Ejemplo n.º 1
0
class Progress_Bar(object):
    def __init__(self):
        self.image_shape = (15, 640, 3)
        self.empty_color = (230, 230, 230)
        self.fill_color = (0, 0, 200)
        self.base_array = 255 * numpy.ones(self.image_shape, dtype=numpy.uint8)
        for i in range(0, 3):
            self.base_array[:, :, i] = self.empty_color[i]
        self.bridge = CvBridge()
        rospy.init_node('progress_bar')

        # Pulications
        self.pub = rospy.Publisher('image_progress_bar', Image)

        # Subscriptions
        self.sub = rospy.Subscriber('progress', ProgressMsg,
                                    self.handle_progress_msg)

    def handle_progress_msg(self, data):
        frame_count = data.frame_count
        progress_t = data.progress_t
        record_t = data.record_t
        image_array = numpy.array(self.base_array)
        if record_t > 0:
            fill_ind = int(self.image_shape[1] * progress_t / record_t)
        else:
            fill_ind = self.image_shape[1]
        for i in range(0, 3):
            image_array[:, :fill_ind, i] = self.fill_color[i]
        cv_image = cv.fromarray(image_array)
        rosimage = self.bridge.cv_to_imgmsg(cv_image, 'rgb8')
        self.pub.publish(rosimage)

    def run(self):
        rospy.spin()
Ejemplo n.º 2
0
class FPS_Image(object):

    def __init__(self):
        self.bridge = CvBridge()
        self.font = PILImageFont.truetype("/usr/share/fonts/truetype/freefont/FreeMono.ttf", 20)
        self.fill = (0,00,0)
        self.bg_color = (255,255,255)
        #self.bg_color = (200,200,255)
        self.image_size = (105,28)
        self.text_pos = (0,4)
        rospy.init_node('fps_info_image')

        self.pub = rospy.Publisher('image_fps', Image)
        self.sub = rospy.Subscriber('fps_info',FPSInfoMsg, self.handle_fps_msg)

    def handle_fps_msg(self,data):
        # Extract keyword arguments
        fps = data.rate

        # Create PIL image and write text to it
        pil_image = PILImage.new('RGB',self.image_size,self.bg_color)
        draw = PILImageDraw.Draw(pil_image)

        fps_text = 'fps %2.1f'%(fps,)
        draw.text(self.text_pos,fps_text,font=self.font,fill=self.fill)

        # Convert to opencv image, then to ROS image and publish
        cv_image = cv.CreateImageHeader(pil_image.size, cv.IPL_DEPTH_8U, 3)
        cv.SetData(cv_image, pil_image.tostring())
        rosimage = self.bridge.cv_to_imgmsg(cv_image,'rgb8')
        self.pub.publish(rosimage)

    def run(self):
        rospy.spin()
Ejemplo n.º 3
0
class Progress_Info(object):

    def __init__(self):
        self.bridge = CvBridge()
        file_dir, file_name = os.path.split(__file__)
        font_path = '%s/../fonts/FreeMono.ttf'%(file_dir,)
        self.font = PILImageFont.truetype(font_path, 20)
        self.fill = (0,0,0)
        rospy.init_node('progress_info')
        self.pub = rospy.Publisher('image_progress_message', Image)
        self.sub = rospy.Subscriber('progress',ProgressMsg, self.handle_progress_msg)


    def handle_progress_msg(self,data): 
        # Extract keyword arguments
        message = data.recording_message 
        frame_count = data.frame_count 
        record_t = data.record_t 
        progress_t = data.progress_t

        # Get minutes and seconds
        record_hr, record_min, record_sec = get_hr_min_sec(record_t)
        progress_hr, progress_min, progress_sec = get_hr_min_sec(progress_t)

        # Create PIL image and write text to it
        pil_image = PILImage.new('RGB',(640,30),(255,255,255))
        draw = PILImageDraw.Draw(pil_image)

        message_text = '%s,'%(message,)
        draw.text((0,10),message_text,font=self.font,fill=self.fill)

        frame_text = 'frame %d'%(frame_count,)
        draw.text((160,10),frame_text,font=self.font,fill=self.fill)

        time_data = (progress_hr,progress_min,progress_sec,record_hr,record_min,record_sec)
        time_text = '%02d:%02d:%02d/%02d:%02d:%02d'%time_data
        draw.text((430,10),time_text,font=self.font,fill=self.fill)

        # Convert to opencv image, then to ROS image and publish
        cv_image = cv.CreateImageHeader(pil_image.size, cv.IPL_DEPTH_8U, 3)
        cv.SetData(cv_image, pil_image.tostring())
        rosimage = self.bridge.cv_to_imgmsg(cv_image,'rgb8')
        self.pub.publish(rosimage)

    def run(self):
        rospy.spin()
Ejemplo n.º 4
0
class MCT_CalibrationNode(CalibrationNode):
    def __init__(self, *args, **kwargs):
        self.lock = threading.Lock()
        self.calibrate = False
        super(MCT_CalibrationNode, self).__init__(*args, **kwargs)
        self.bridge = CvBridge()
        self.cal_img_pub = rospy.Publisher('image_calibrator',
                                           sensor_msgs.msg.Image)

        self.font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 2)
        self.font_color_red = cv.CV_RGB(255, 0, 0)
        self.font_color_green = cv.CV_RGB(0, 255, 0)
        self.font_color_blue = cv.CV_RGB(80, 80, 255)
        self.x_start = 10
        self.y_start = 30
        self.x_step = 140
        self.y_step = 32

        # Set up sevices
        node_name = rospy.get_name()
        self.good_enough_srv = rospy.Service(
            '{0}/good_enough'.format(node_name),
            GetBool,
            self.handle_good_enough_srv,
        )

        self.calibrate_srv = rospy.Service(
            '{0}/calibrate'.format(node_name),
            GetBool,
            self.handle_calibrate_srv,
        )

        self.get_calibration_srv = rospy.Service(
            '{0}/get_calibration'.format(node_name),
            GetString,
            self.handle_get_calibration_srv,
        )

    def handle_good_enough_srv(self, req):
        """
        Handles requests for the calibrators good_enough flag which indicates whether
        or not sufficient data has been aquired for calibrating the camera.
        """
        with self.lock:
            good_enough = self.c.goodenough
        return GetBoolResponse(good_enough)

    def handle_calibrate_srv(self, req):
        """
        Handles requests for calibrating the cameras associated with the calibrator.
        """
        with self.lock:
            if self.c.goodenough:
                self.calibrate = True
                flag = True
            else:
                flag = False
        return GetBoolResponse(flag)

    def handle_get_calibration_srv(self, req):
        """
        Handles requests for the camera calibration parameters.
        """
        with self.lock:
            if self.c.calibrated:
                ost_txt = self.c.ost()
            else:
                ost_txt = ''

        if ost_txt:
            # Add actuall camera name to calibration
            camera_topic = rospy.remap_name('camera')
            camera_name = camera_topic.split('/')[2]
            ost_txt = ost_txt.replace('narrow_stereo/left', camera_name)

        return GetStringResponse(ost_txt)

    def redraw_monocular(self, drawable):
        """
        Redraw calibratoin image callback.
        """
        if not self.c.calibrated:

            with self.lock:
                if self.calibrate and self.c.goodenough:
                    self.c.do_calibration()

            if self.c.goodenough:
                text_data = [('Qty', '{0}'.format(len(self.c.db))),
                             ('Good Enough', )]
                self.add_progress_text(drawable.scrib, text_data,
                                       self.font_color_green)
            else:
                if drawable.params:
                    text_data = [('Qty', '{0}'.format(len(self.c.db)))] + list(
                        drawable.params)
                    self.add_progress_text(drawable.scrib, text_data,
                                           self.font_color_red)
                else:
                    text_data = [('No Data', )]
                    self.add_progress_text(drawable.scrib, text_data,
                                           self.font_color_red)

        else:
            text_data = [('Calibrated', )]
            self.add_progress_text(drawable.scrib, text_data,
                                   self.font_color_green)
            #print self.c.ost()

        rosimage = self.bridge.cv_to_imgmsg(drawable.scrib, 'bgr8')
        self.cal_img_pub.publish(rosimage)

    def add_progress_text(self, img, text_data, font_color):
        """
        Adds progress text to calibration image, img. The text to be added
        consists of a list of tuples where each tuple is a row of text to be
        added.
        """
        for i, values in enumerate(text_data):
            for j, item in enumerate(values):
                if type(item) == float:
                    msg = '{0:1.2f}'.format(item)
                else:
                    msg = '{0}'.format(item)
                pos = (self.x_start + j * self.x_step,
                       self.y_start + i * self.y_step)
                cv.PutText(img, msg, pos, self.font, font_color)
Ejemplo n.º 5
0
class ThreePointTracker(object):
    def __init__(self, topic=None):
        self.topic = topic
        self.lock = threading.Lock()
        self.bridge = CvBridge()
        self.camera = get_camera_from_topic(self.topic)
        camera_calibration = file_tools.read_camera_calibration(self.camera)
        self.camera_matrix = get_camera_matrix(camera_calibration)

        # Blob finder parameters
        self.blobFinder = BlobFinder()
        self.blobFinder.threshold = 150
        self.blobFinder.filter_by_area = False
        self.blobFinder.min_area = 0
        self.blobFinder.max_area = 200

        # Tracking parameters
        self.tracking_pts_dist = (0.0, 0.04774, 0.07019)
        self.tracking_pts_colors = [(0, 0, 255), (0, 255, 0), (0, 255, 255)]

        rospy.init_node('blob_finder')
        self.ready = False
        self.image_sub = rospy.Subscriber(self.topic, Image,
                                          self.image_callback)

        self.image_tracking_pts_pub = rospy.Publisher('image_tracking_pts',
                                                      Image)
        self.image_blobs_pub = rospy.Publisher('image_blobs', Image)
        self.blob_data_pub = rospy.Publisher('blob_data', BlobData)

        self.devel_pub = rospy.Publisher('devel_data', Float32)

        node_name = rospy.get_name()
        self.set_param_srv = rospy.Service('{0}/set_param'.format(node_name),
                                           BlobFinderSetParam,
                                           self.handle_set_param_srv)
        self.get_param_srv = rospy.Service('{0}/get_param'.format(node_name),
                                           BlobFinderGetParam,
                                           self.handle_get_param_srv)
        self.ready = True

    def handle_set_param_srv(self, req):
        """
        Handles requests to set the blob finder's parameters. Currently this
        is just the threshold used for binarizing the image.
        """
        with self.lock:
            self.blobFinder.threshold = req.threshold
            self.blobFinder.filter_by_area = req.filter_by_area
            self.blobFinder.min_area = req.min_area
            self.blobFinder.max_area = req.max_area
        return BlobFinderSetParamResponse(True, '')

    def handle_get_param_srv(self, req):
        """
        Handles requests for the blob finders parameters
        """
        with self.lock:
            threshold = self.blobFinder.threshold
            filter_by_area = self.blobFinder.filter_by_area
            min_area = self.blobFinder.min_area
            max_area = self.blobFinder.max_area
        resp_args = (threshold, filter_by_area, min_area, max_area)
        return BlobFinderGetParamResponse(*resp_args)

    def image_callback(self, data):
        """
        Callback for image topic subscription - finds blobs in image.
        """
        if not self.ready:
            return

        with self.lock:
            blobs_list, blobs_rosimage = self.blobFinder.findBlobs(data)

        # ---------------------------------------------------------------------

        # Create trakcing points  image
        cv_image = self.bridge.imgmsg_to_cv(data,
                                            desired_encoding="passthrough")
        ipl_image = cv.GetImage(cv_image)
        tracking_pts_image = cv.CreateImage(cv.GetSize(ipl_image),
                                            cv.IPL_DEPTH_8U, 3)
        cv.CvtColor(ipl_image, tracking_pts_image, cv.CV_GRAY2BGR)

        #blobs_list = get_synthetic_blobs(self.tracking_pts_dist, self.camera_matrix)
        num_blobs = len(blobs_list)
        if num_blobs == 3:

            #print('3 pts found')
            uv_list = self.get_sorted_uv_points(blobs_list)
            t, w = get_object_pose(1.0, 2.2, uv_list, self.tracking_pts_dist,
                                   self.camera_matrix)
            uv_list_pred = get_uv_list_from_vects(t, w, self.tracking_pts_dist,
                                                  self.camera_matrix)
            elev = math.asin(w[2])
            head = math.atan2(w[1], w[0])
            elev_deg = 180.0 * elev / math.pi
            head_deg = 180.0 * head / math.pi

            #print('w', ['{0:1.3f}'.format(x) for x in w])
            #print('t', ['{0:1.3f}'.format(x) for x in t])
            print('head: {0:1.3f}'.format(head_deg))
            print('elev: {0:1.3f}'.format(elev_deg))
            print('tx:   {0:1.3f}'.format(t[0]))
            print('ty:   {0:1.3f}'.format(t[1]))
            print('tz:   {0:1.3f}'.format(t[2]))
            print()
            self.devel_pub.publish(t[2])

            # Draw points on tracking points image
            for i, uv in enumerate(uv_list):
                color = self.tracking_pts_colors[i]
                u, v = uv
                #cv.Circle(tracking_pts_image, (int(u),int(v)),3, color)
                cv.Circle(tracking_pts_image, (int(u), int(v)), 3, (0, 255, 0))

            for i, uv in enumerate(uv_list_pred):
                color = self.tracking_pts_colors[i]
                u, v = uv
                #cv.Circle(tracking_pts_image, (int(u),int(v)),3, color)
                cv.Circle(tracking_pts_image, (int(u), int(v)), 3, (0, 0, 255))
        else:
            print('3 pts not found ', end='')
            if num_blobs > 3:
                print('too many blobs')
            else:
                print('too few blobs')

        # Publish calibration progress image
        ros_image = self.bridge.cv_to_imgmsg(tracking_pts_image,
                                             encoding="passthrough")
        self.image_tracking_pts_pub.publish(ros_image)

        # ---------------------------------------------------------------------

        ## Publish image of blobs
        #self.image_blobs_pub.publish(blobs_rosimage)

        ## Create the blob data message and publish
        #blob_data_msg = BlobData()
        #blob_data_msg.header = data.header
        #blob_data_msg.number_of_blobs = len(blobs_list)
        #for b in blobs_list:
        #    blob_msg = Blob()
        #    for k, v in b.iteritems():
        #        setattr(blob_msg,k,v)
        #    blob_data_msg.blob.append(blob_msg)
        #self.blob_data_pub.publish(blob_data_msg)

    def get_object_pose(self, uv_list):
        """
        Calculates pose of three point object
        """
        pass

    def get_sorted_uv_points(self, blobs_list):
        """
        For blob lists with three blobs finds the identities of the blobs based on
        colinearity and distance between the points.
        """
        assert len(blobs_list) == 3, 'blobs list must contain only thre blobs'
        # Get x and y point lists
        u_list = [blob['centroid_x'] for blob in blobs_list]
        v_list = [blob['centroid_y'] for blob in blobs_list]

        # Determine x and y span
        u_min, u_max = min(u_list), max(u_list)
        v_min, v_max = min(v_list), max(v_list)
        u_span = u_max - u_min
        v_span = v_max - v_min

        # Use max span to sort points
        if u_span >= v_span:
            uv_list = zip(u_list, v_list)
            uv_list.sort()
            u_list = [u for u, v in uv_list]
            v_list = [v for u, v in uv_list]
        else:
            vu_list = zip(v_list, u_list)
            vu_list.sort()
            u_list = [u for v, u in vu_list]
            v_list = [v for v, u in vu_list]

        # #####################################################################
        # Look at distances and sort so that the large gap occurs first
        # Note currently assuming the largest gap occurs first this should
        # really come from the tracking_pts_pos
        # #####################################################################
        uv_list = zip(u_list, v_list)
        dist_0_to_1 = distance_2d(uv_list[0], uv_list[1])
        dist_1_to_2 = distance_2d(uv_list[1], uv_list[2])
        if dist_0_to_1 <= dist_1_to_2:
            uv_list.reverse()
        return uv_list

    def run(self):
        rospy.spin()
Ejemplo n.º 6
0
class ZoomToolNode(object):

    def __init__(self,topic=None):
        self.topic = topic
        self.lock = threading.Lock()
        self.bridge = CvBridge()
        rospy.init_node('zoom_tool')
        node_name = rospy.get_name()

        self.blobFinder = BlobFinder()
        self.blobFinder.threshold = rospy.get_param('/zoom_tool_params/threshold', 200)
        self.blobFinder.filter_by_area = rospy.get_param('/zoom_tool_params/filter_by_area', False) 
        self.blobFinder.min_area = rospy.get_param('/zoom_tool_params/min_area', 0) 
        self.blobFinder.max_area = rospy.get_param('/zoom_tool_params/max_area', 200) 

        self.circle_color = (0,0,255)
        self.text_color = (0,0,255)
        self.cv_text_font = cv.InitFont(cv.CV_FONT_HERSHEY_TRIPLEX, 0.8, 0.8,thickness=1)

        self.image_sub = rospy.Subscriber(self.topic,Image,self.image_callback)
        self.image_pub = rospy.Publisher('image_zoom_tool', Image)
        #self.devel_pub = rospy.Publisher('develop', Float32)

        self.set_param_srv = rospy.Service( 
                '{0}/set_param'.format(node_name), 
                BlobFinderSetParam, 
                self.handle_set_param_srv
                )
        self.get_param_srv = rospy.Service( 
                '{0}/get_param'.format(node_name), 
                BlobFinderGetParam, 
                self.handle_get_param_srv
                )

    def handle_set_param_srv(self, req):
        """
        Handles requests to set the blob finder's parameters. Currently this
        is just the threshold used for binarizing the image.
        """
        with self.lock:
            self.blobFinder.threshold = req.threshold
            self.blobFinder.filter_by_area = req.filter_by_area
            self.blobFinder.min_area = req.min_area
            self.blobFinder.max_area = req.max_area
        return BlobFinderSetParamResponse(True,'')

    def handle_get_param_srv(self,req):
        """
        Handles requests for the blob finders parameters
        """
        with self.lock:
            threshold = self.blobFinder.threshold
            filter_by_area = self.blobFinder.filter_by_area
            min_area = self.blobFinder.min_area
            max_area = self.blobFinder.max_area
        resp_args = (threshold, filter_by_area, min_area, max_area)
        return  BlobFinderGetParamResponse(*resp_args)


    def image_callback(self,data):
        """
        Callback for image topic subscription - finds blobs in image.
        """

        #t0 = rospy.Time.now()
        with self.lock:
            blobs_list, blobs_image = self.blobFinder.findBlobs(data, create_image=True)

        if len(blobs_list) == 2:

            # If two blobs are found computer the distance between them and
            # show it on the published image.
            x0 = blobs_list[0]['centroid_x']
            y0 = blobs_list[0]['centroid_y']
            x1 = blobs_list[1]['centroid_x']
            y1 = blobs_list[1]['centroid_y']
            point_list = [(x0,y0),(x1,y1)]
            dist = math.sqrt((x0-x1)**2 + (y0-y1)**2)
            message = 'dist = {0:1.1f} px'.format(dist)
            cv.PutText(blobs_image,message,(10,25),self.cv_text_font,self.text_color)

            ## Publish calibration progress image
            blobs_rosimage = self.bridge.cv_to_imgmsg(blobs_image,encoding="passthrough")
            self.image_pub.publish(blobs_rosimage)
        else:
            self.image_pub.publish(data)

        #t1 = rospy.Time.now()
        #dt = t1.to_sec() - t0.to_sec()
        #print(1.0/dt)
        #self.devel_pub.publish(dt)


    def run(self):
        rospy.spin()
Ejemplo n.º 7
0
        time.sleep(0.5)
        bridge = CvBridge()

        got_pose_dict = {'flag': False, 'pose_fail': False}
        topic_name_cb = '/checkerdetector/ObjectDetection'
        rospy.Subscriber(topic_name_cb, ObjectDetection, got_pose_cb,
                         got_pose_dict)

        failed_im_list = [] # list of filenames on which checkboard detection failed.
        n_images = len(im_name_list)
        for i in range(n_images):
            name = im_name_list[i]
            cv_im = cv.LoadImage(name)

            rosimage = bridge.cv_to_imgmsg(cv_im, "bgr8")
            rosimage.header.stamp = rospy.Time.from_sec(time_list[i])

            image_pub.publish(rosimage)
            config_pub.publish(CameraInfo(P=intrinsic_list))

            t_st = time.time()
            while got_pose_dict['flag'] == False:
                time.sleep(0.5)
                if (time.time()-t_st) > 10.:
                    break

            if got_pose_dict['pose_fail'] == True:
                failed_im_list.append(name)

            time.sleep(0.5)
Ejemplo n.º 8
0
        print 'representing opencv\'s index for '
        print 'the particular device'

    camera_id = int(sys.argv[1])
    topic_name = 'cvcamera' + str(camera_id)

    image_pub = rospy.Publisher(topic_name, Image)
    rospy.init_node('cvcamera', anonymous=True)

    capture = cv.CaptureFromCAM(camera_id)
    bridge = CvBridge()

    print 'Opening OpenCV camera with ID', camera_id
    print 'Publishing on topic', topic_name
    while not rospy.is_shutdown():
        try:
            cv_image = cv.CloneImage(cv.QueryFrame(capture))
            rosimage = bridge.cv_to_imgmsg(cv_image, "bgr8")
            image_pub.publish(rosimage)
        except rospy.exceptions.ROSSerializationException, e:
            print 'serialization exception'
        except CvBridgeError, e: 
            print e
            break
        except KeyboardInterrupt:
            print "Shutting down."
            break
        time.sleep(1/100.0)

    cv.DestroyAllWindows()
Ejemplo n.º 9
0
class TimeStampWatchdog(object):
    """
    Watchdog for hardware triggered cameras. Checks for correct sequence order
    and maximum allowed time stamp error.
    """
    def __init__(self, frame_rate, max_allowed_error=10.0e-3, max_seq_age=200):

        self.frame_rate = frame_rate

        self.lock = threading.Lock()
        self.max_allowed_error = max_allowed_error
        self.max_seq_age = max_seq_age

        self.seq_to_camera_stamps = {}
        self.stamp_last = {}

        self.max_error_by_camera = {}
        self.max_error = 0.0
        self.camera_fail = ''

        self.most_recent_seq = None
        self.last_checked_seq = None

        self.ok = True
        self.seq_fail = 0
        self.error_msg = ''

        self.bridge = CvBridge()
        self.info_image_size = (400, 90)
        self.font = PILImageFont.truetype(
            "/usr/share/fonts/truetype/ubuntu-font-family/Ubuntu-B.ttf", 16)

        self.ready = False
        rospy.init_node('time_stamp_watchdog')

        # Subscribe to camera info topics
        self.wait_for_camera_info_topics()
        camera_info_topics = mct_introspection.find_camera_info_topics()
        self.camera_info_sub = {}
        for topic in camera_info_topics:
            camera = get_camera_from_topic(topic)
            handler = functools.partial(self.camera_info_handler, camera)
            self.camera_info_sub[camera] = rospy.Subscriber(
                topic, CameraInfo, handler)
        self.number_of_cameras = len(camera_info_topics)

        # Create reset service
        self.reset_srv = rospy.Service('time_stamp_watchdog_reset', Empty,
                                       self.reset_handler)

        # Create time stamp watchdog publication
        self.watchdog_pub = rospy.Publisher('time_stamp_watchdog',
                                            TimeStampWatchDog)

        # Create watchdog info image
        self.image_watchdog_pub = rospy.Publisher('image_time_stamp_watchdog',
                                                  Image)

        self.ready = True

    def wait_for_camera_info_topics(self):
        """
        Wait until the number of camera info topics is equal to the number of
        cameras in the current camera assignment.
        """
        camera_assignment = file_tools.read_camera_assignment()
        number_of_cameras = len(camera_assignment)
        while 1:
            camera_info_topics = mct_introspection.find_camera_info_topics()
            if len(camera_info_topics) == number_of_cameras:
                break
            rospy.sleep(0.25)

    def reset_handler(self, req):
        """
        Handles reset service
        """
        with self.lock:
            self.seq_to_camera_stamps = {}
            self.stamp_last = {}
            self.max_error_by_camera = {}
            self.max_error = 0.0
            self.most_recent_seq = None
            self.last_checked_seq = None
            self.ok = True
            self.seq_fail = 0
            self.error_msg = ''
            self.camera_fail = ''
        return EmptyResponse()

    def camera_info_handler(self, camera, data):
        """
        Handler for camera info messages. Stores the time stamps based on
        seqeunce number and camera.
        """
        if not self.ready:
            return

        with self.lock:
            seq = data.header.seq
            stamp = data.header.stamp
            try:
                self.seq_to_camera_stamps[seq][camera] = stamp
            except KeyError:
                self.seq_to_camera_stamps[seq] = {camera: stamp}
            self.most_recent_seq = seq

    def check_camera_stamps(self, seq, camera_stamps):
        """
        Checks the time stamps for a given sequence number. Examines the
        sequence number to ensure it is the next expected and exaimes the
        spread of the time stamps to ensure that is less than the allowed
        maximum. The results are published on the watchdog topic.
        """
        cur_seq_ok = True
        error_list = []
        # Check for correct sequence order
        if self.last_checked_seq is not None:
            if seq != self.last_checked_seq + 1:
                cur_seq_ok = False
                self.seq_fail = seq
                error_list.append('sequence out of order')
        self.last_checked_seq = seq

        # Get the max and min time stamps for cameras on the same machine
        stamp_delta = {}
        stamp_error = {}

        for camera, stamp in camera_stamps.iteritems():
            # Get differenece between current and last time stamps and compute the stamp error
            try:
                stamp_delta[camera] = stamp.to_sec(
                ) - self.stamp_last[camera].to_sec()
            except KeyError:
                stamp_delta[camera] = 1.0 / self.frame_rate
            stamp_error[camera] = abs(1.0 / self.frame_rate -
                                      stamp_delta[camera])

            # Find the maximum error so far for each camera
            try:
                self.max_error_by_camera[camera] = max(
                    [self.max_error_by_camera[camera], stamp_error[camera]])
            except KeyError:
                self.max_error_by_camera[camera] = stamp_error[camera]

            self.stamp_last[camera] = stamp

        # Compute the maximum error so far for all cameras
        self.max_error = max(
            [err for err in self.max_error_by_camera.values()])
        if self.max_error > self.max_allowed_error:
            camera_fail_list = [
                cam for cam, err in self.max_error_by_camera.items()
                if err > self.max_allowed_error
            ]
            self.camera_fail = str(camera_fail_list)
            cur_seq_ok = False
            error_list.append('time stamp outside of expected range')

        # If this is the first seq with an error set the error message
        if self.ok and not cur_seq_ok:
            self.ok = False
            self.seq_fail = seq
            self.error_msg = ', '.join(error_list)

        # Publish watchdog message
        watchdog_msg = TimeStampWatchDog()
        watchdog_msg.seq = seq
        watchdog_msg.ok = self.ok
        watchdog_msg.frame_rate = self.frame_rate
        watchdog_msg.max_allowed_error = self.max_allowed_error
        watchdog_msg.max_error = self.max_error
        watchdog_msg.seq_fail = self.seq_fail
        watchdog_msg.error_msg = self.error_msg
        watchdog_msg.camera_fail = self.camera_fail
        self.watchdog_pub.publish(watchdog_msg)

        # Publish watchdog image
        pil_info_image = PILImage.new('RGB', self.info_image_size,
                                      (255, 255, 255))
        draw = PILImageDraw.Draw(pil_info_image)
        info_items = [('ok', 'ok', ''), ('seq', 'seq', ''),
                      ('max_error', 'max error', 's')]
        text_x, text_y, step_y = 10, 10, 20
        for i, item in enumerate(info_items):
            name, label, units = item
            value = getattr(watchdog_msg, name)
            label_text = '{0}:'.format(label)
            if type(value) == float:
                value_text = '{0:<1.6f}'.format(value)
            else:
                value_text = '{0}'.format(value)
            units_text = '{0}'.format(units)
            draw.text((text_x, text_y + step_y * i),
                      label_text,
                      font=self.font,
                      fill=(0, 0, 0))
            draw.text((text_x + 100, text_y + step_y * i),
                      value_text,
                      font=self.font,
                      fill=(0, 0, 0))
            draw.text((text_x + 180, text_y + step_y * i),
                      units_text,
                      font=self.font,
                      fill=(0, 0, 0))

        if self.error_msg:
            error_text = 'Error: {0}'.format(self.error_msg)
        else:
            error_text = ''

        draw.text(
            (text_x, text_y + len(info_items) * step_y),
            error_text,
            font=self.font,
            fill=(255, 0, 0),
        )

        cv_info_image = cv.CreateImageHeader(pil_info_image.size,
                                             cv.IPL_DEPTH_8U, 3)
        cv.SetData(cv_info_image, pil_info_image.tostring())

        # Convert to a rosimage and publish
        info_rosimage = self.bridge.cv_to_imgmsg(cv_info_image, 'rgb8')
        self.image_watchdog_pub.publish(info_rosimage)

    def process_camera_stamps(self):
        """
        Processes the camera time stamps. First, looks to see if all the images
        have been recieved for a given sequence number. Second, check if any of
        the sequences are older than the maximum allowed age and if so throws
        them away. 
        """
        for seq, camera_stamps in sorted(self.seq_to_camera_stamps.items()):
            if len(camera_stamps) == self.number_of_cameras:
                self.check_camera_stamps(seq, camera_stamps)
                del self.seq_to_camera_stamps[seq]
            else:
                if self.most_recent_seq is not None:
                    seq_age = self.most_recent_seq - seq
                    if seq_age > self.max_seq_age:
                        del self.seq_to_camera_stamps[seq]

    def run(self):
        """
        Node, main loop.  While the node
        """
        while not rospy.is_shutdown():
            with self.lock:
                self.process_camera_stamps()
Ejemplo n.º 10
0
class Frame_Drop_Corrector(object):
    """
    Frame drop corrector node. Subscribes to the given image topic and detects
    dropped frames. Dummy frames are inserted for any frames which are dropped.
    """
    def __init__(self,
                 topic,
                 framerate,
                 tol=0.005,
                 max_stamp_age=1.5,
                 publish_image_corr=True):
        self.ready = False
        self.topic = topic
        self.framerate = framerate
        self.tol = tol
        self.publish_image_corr = publish_image_corr
        self.lock = threading.Lock()
        self.camera_info_topic = get_camera_info_from_image_topic(self.topic)
        self.max_stamp_age = max_stamp_age
        self.bridge = CvBridge()
        self.last_pub_stamp = None
        self.latest_stamp = None
        self.dummy_image = None
        self.seq_offset = None
        self.frame_drop_list = []

        # Data dictionaries for synchronizing tracking data with image seq number
        self.stamp_to_image = {}
        self.stamp_to_seq = {}
        self.seq_to_stamp_and_image = {}

        rospy.init_node('frame_drop_corrector')

        # Subscribe to image and camera info topics
        self.image_sub = rospy.Subscriber(self.topic, Image,
                                          self.image_callback)
        self.info_sub = rospy.Subscriber(self.camera_info_topic, CameraInfo,
                                         self.camera_info_callback)

        # Set up image publisher
        topic_split = self.topic.split('/')
        seq_and_image_topic = topic_split[:-1]
        seq_and_image_topic.append('seq_and_image_corr')
        seq_and_image_topic = '/'.join(seq_and_image_topic)
        self.seq_and_image_repub = rospy.Publisher(seq_and_image_topic,
                                                   SeqAndImage)
        if self.publish_image_corr:
            image_topic = topic_split[:-1]
            image_topic.append('image_corr')
            image_topic = '/'.join(image_topic)
            self.image_repub = rospy.Publisher(image_topic, Image)

        # Set up dropped frame # publisher
        frames_dropped_topic = topic_split[:-1]
        frames_dropped_topic.append('frames_dropped')
        frames_dropped_topic = '/'.join(frames_dropped_topic)
        self.frames_dropped_pub = rospy.Publisher(frames_dropped_topic,
                                                  FramesDropped)

        # Set up frame drop info service - returns list of seq numbers
        # corresponding to the dropped frames for image stream.
        frame_drop_srv_name = topic_split[:-1]
        frame_drop_srv_name.append('frame_drop_info')
        frame_drop_srv_name = '/'.join(frame_drop_srv_name)
        self.frame_drop_srv = rospy.Service(frame_drop_srv_name, FrameDropInfo,
                                            self.handle_frame_drop_srv)

        # Setup reset service - needs to be called anytime the camera trigger is
        # stopped. It resets the last_pub_stamp to none.
        reset_srv_name = topic_split[:-1]
        reset_srv_name.append('frame_drop_reset')
        reset_srv_name = '/'.join(reset_srv_name)
        self.reset_srv = rospy.Service(reset_srv_name, Empty,
                                       self.handle_reset_srv)
        self.ready = True

    def handle_reset_srv(self, req):
        """
        Resets the frame not detection node. This prevents the addition of a huge
        number of 'false' dropped frames when the system is restarted.
        """
        with self.lock:
            self.last_pub_stamp = None
            self.seq_offset = None
            self.frame_drop_list = []
        return EmptyResponse()

    def handle_frame_drop_srv(self, req):
        """
        Returns list of sequences numbers for all dropped frames.
        """
        with self.lock:
            frame_drop_list = list(self.frame_drop_list)
        return FrameDropInfoResponse(frame_drop_list)

    def camera_info_callback(self, data):
        """
        Callback for camera info topic subscription - used to associate stamp
        with the image seq number.
        """
        if not self.ready:
            return
        stamp_tuple = data.header.stamp.secs, data.header.stamp.nsecs
        with self.lock:
            self.latest_stamp = stamp_tuple
            self.stamp_to_seq[stamp_tuple] = data.header.seq

    def image_callback(self, data):
        """
        Callback for image topic subscription - used to associate the stamp
        tuple with the imcomming image.
        """
        if not self.ready:
            return
        stamp_tuple = data.header.stamp.secs, data.header.stamp.nsecs
        with self.lock:
            self.stamp_to_image[stamp_tuple] = data

            if self.dummy_image is None:
                # Create dummy image for use when a dropped frame occurs.
                cv_image = self.bridge.imgmsg_to_cv(
                    data, desired_encoding="passthrough")
                raw_image = cv.GetImage(cv_image)
                dummy_cv_image = cv.CreateImage(cv.GetSize(raw_image),
                                                raw_image.depth,
                                                raw_image.channels)
                cv.Zero(dummy_cv_image)
                self.dummy_image = self.bridge.cv_to_imgmsg(
                    dummy_cv_image, encoding="passthrough")

    def associate_image_w_seq(self):
        """
         Associate iamges  with image seq numbers
        """
        with self.lock:
            for stamp, image in self.stamp_to_image.items():
                try:
                    seq = self.stamp_to_seq[stamp]
                    seq_found = True
                except KeyError:
                    seq_found = False

                if seq_found:
                    self.seq_to_stamp_and_image[seq] = stamp, image
                    try:
                        del self.stamp_to_image[stamp]
                    except KeyError:
                        pass
                    try:
                        del self.stamp_to_seq[stamp]
                    except KeyError:
                        pass

                else:
                    # Throw away data greater than the maximum allowed age
                    if self.latest_stamp is not None:
                        latest_stamp_secs = stamp_tuple_to_secs(
                            self.latest_stamp)
                        stamp_secs = stamp_tuple_to_secs(stamp)
                        stamp_age = latest_stamp_secs - stamp_secs
                        if stamp_age > self.max_stamp_age:
                            try:
                                del self.stamp_to_image[stamp]
                            except KeyError:
                                pass

    def republish_seq_and_image(self):
        """
        Republished the sequence numbers and images with black frames inserted for
        any dropped frames which are discovered by looking at the time stamps.
        """
        for seq, stamp_and_image in sorted(
                self.seq_to_stamp_and_image.items()):
            stamp_tuple, image = stamp_and_image

            if self.seq_offset is None:
                # This is the first run or reset has been called. In the case reset
                # the seq offset so that the seq numbers begin with 1.
                self.seq_offset = 1 - seq

            if self.last_pub_stamp is not None:
                dt = stamp_dt_secs(stamp_tuple, self.last_pub_stamp)
                #framegap = int(round(dt*self.framerate))
                framegap = approx_frame_number(dt, 1.0 / self.framerate,
                                               self.tol)

                for i in range(framegap - 1):
                    # Note, framegap > 1 implies that we have drop frames.
                    # Publish dummies frames until we are caught up.
                    dummy_seq = seq + self.seq_offset
                    dummy_stamp_tuple = incr_stamp_tuple(
                        self.last_pub_stamp, (i + 1) / self.framerate)
                    dummy_image = self.dummy_image
                    dummy_image.header.seq = dummy_seq
                    dummy_image.header.stamp = rospy.Time(*dummy_stamp_tuple)
                    self.seq_and_image_repub.publish(dummy_seq, dummy_image)
                    self.frames_dropped_pub.publish(dummy_seq,
                                                    len(self.frame_drop_list))
                    if self.publish_image_corr:
                        self.image_repub.publish(dummy_image)
                    self.seq_offset += 1
                    self.frame_drop_list.append(dummy_seq)

            # Publish new frame with corrected sequence number - to account for dropped frames
            corrected_seq = seq + self.seq_offset
            image.header.seq = corrected_seq
            image.header.stamp = rospy.Time(*stamp_tuple)
            self.seq_and_image_repub.publish(corrected_seq, image)
            self.frames_dropped_pub.publish(corrected_seq,
                                            len(self.frame_drop_list))
            if self.publish_image_corr:
                self.image_repub.publish(image)
            self.last_pub_stamp = stamp_tuple
            del self.seq_to_stamp_and_image[seq]

    def run(self):
        """
        Main loop. Associates images and time stamps w/ image sequence numbers.
        Examines time interval between frames to detect dropped frames.
        Re-publishes sequences and images as combined SeqAndImage topic. When a
        dropped frame is detected a blank "dummy" frame is inserted in its place.
        """
        while not rospy.is_shutdown():
            self.associate_image_w_seq()
            self.republish_seq_and_image()
Ejemplo n.º 11
0
class Transform_2D_Calibrator(object):
    def __init__(self, topic_0, topic_1):

        self.ready = False
        self.state = WAITING  # There are 3 allowed states WAITING, WORKING, FINISHED
        #self.state = WORKING
        self.topics = topic_0, topic_1
        self.bridge = CvBridge()
        self.lock = threading.Lock()

        rospy.init_node('transform_2d_calibrator')
        self.node_name = rospy.get_name()

        # Initialize data lists
        self.blobs = {
            self.topics[0]: [],
            self.topics[1]: [],
        }
        self.index_to_image = {
            self.topics[0]: {},
            self.topics[1]: {},
        }
        self.overlap_indices = []

        # Set active target information and turn off leds
        target_info = mct_active_target.active_target_info()
        self.led_n_max = target_info[0]
        self.led_m_max = target_info[1]
        self.number_of_leds = self.led_n_max * self.led_m_max
        self.led_max_power = target_info[2]
        self.led_space_x = target_info[3]
        self.led_space_y = target_info[3]
        mct_active_target.off()

        # Current led indices
        self.led_n = 0
        self.led_m = 0

        # Wait counter for image acquisition

        self.image_wait_cnt = {
            self.topics[0]: 0,
            self.topics[1]: 0,
        }

        # Sleep periods for idle and wait count loops
        self.idle_sleep_dt = 0.25
        self.wait_sleep_dt = 0.005

        # Led power setting
        params_namespace = '/transform_2d_calibrator_params'
        self.led_power = rospy.get_param(
            '{0}/target/led_power'.format(params_namespace), 10)

        # Wait count for image acquisition
        self.image_wait_number = rospy.get_param(
            '{0}/image_wait_number'.format(params_namespace), 4)

        # Initialize blob finder
        self.blobFinder = BlobFinder()
        self.blobFinder.threshold = rospy.get_param(
            '{0}/blob_finder/threshold'.format(params_namespace), 150)
        self.blobFinder.filter_by_area = rospy.get_param(
            '{0}/blob_finder/filter_by_area'.format(params_namespace), False)
        self.blobFinder.min_area = rospy.get_param(
            '{0}/blob_finder/min_area'.format(params_namespace), 0)
        self.blobFinder.max_area = rospy.get_param(
            '{0}/blob_finder/max_area'.format(params_namespace), 200)

        # Number of points required
        self.num_points_required = rospy.get_param(
            '{0}/num_points_required'.format(params_namespace), 10)
        self.transform = None
        self.transform_error = None

        # Get homography matrices
        self.homography_dict = {}
        for topic in self.topics:
            try:
                # Get the data from the parameter server
                rows = rospy.get_param(
                    '{0}/homography_matrix/rows'.format(topic))
                cols = rospy.get_param(
                    '{0}/homography_matrix/cols'.format(topic))
                data = rospy.get_param(
                    '{0}/homography_matrix/data'.format(topic))
            except KeyError:
                # Data is not on the parameter server - try getting it by reading the file
                camera = get_camera_from_topic(topic)
                homography = file_tools.read_homography_calibration(camera)
                rows = homography['rows']
                cols = homography['cols']
                data = homography['data']

            # Rearrange into numpy matrix
            homography_matrix = numpy.array(homography['data'])
            homography_matrix = homography_matrix.reshape((rows, cols))
            self.homography_dict[topic] = homography_matrix

        # Set font and initial image information
        self.cv_text_font = cv.InitFont(cv.CV_FONT_HERSHEY_TRIPLEX,
                                        0.8,
                                        0.8,
                                        thickness=1)
        self.image_info = 'no data'

        # Subscription to image topic
        image_callback_0 = functools.partial(self.image_callback,
                                             self.topics[0])
        image_callback_1 = functools.partial(self.image_callback,
                                             self.topics[1])
        self.image_sub = {
            self.topics[0]:
            rospy.Subscriber(self.topics[0], Image, image_callback_0),
            self.topics[1]:
            rospy.Subscriber(self.topics[1], Image, image_callback_1),
        }

        # Publications - bcalibration progress images for topics 0 and 1
        self.image_pub = {
            self.topics[0]:
            rospy.Publisher('image_transform_calibration_0', Image),
            self.topics[1]:
            rospy.Publisher('image_transform_calibration_1', Image),
        }

        # Services
        self.start_srv = rospy.Service('{0}/start'.format(self.node_name),
                                       GetFlagAndMessage,
                                       self.handle_start_srv)

        self.is_calibrated_srv = rospy.Service(
            '{0}/is_calibrated'.format(self.node_name), GetBool,
            self.handle_is_calibrated_srv)

        self.get_transform_2d_srv = rospy.Service(
            '{0}/get_transform_2d'.format(self.node_name), GetTransform2d,
            self.handle_get_transform_2d_srv)

        self.ready = True

    def handle_get_transform_2d_srv(self, req):
        """
        Handles requests to get the transform found by the 
        """
        if self.transform is None:
            rotation = 0.0
            translation_x = 0.0
            translation_y = 0.1
        else:
            rotation = self.transform['rotation']
            translation_x = self.transform['translation_x']
            translation_y = self.transform['translation_y']
        return GetTransform2dResponse(rotation, translation_x, translation_y)

    def handle_start_srv(self, req):
        """
        Handles to start/restart the homography calibration procedure.
        """
        flag = True
        message = ''
        with self.lock:
            flag, message = mct_active_target.lock(self.node_name)
            if flag:
                self.image_info = ''
                self.state = WORKING
                self.led_n = 0
                self.led_m = 0
                self.blobs = {topic_0: [], topic_1: []}
                self.index_to_image = {topic_0: {}, topic_1: {}}
                self.overlap_indices = []
                self.transform = None
                self.transform_error = None
        return GetFlagAndMessageResponse(flag, message)

    def handle_is_calibrated_srv(self, req):
        """
        Handles requests for whether or not the transform calibration has been completed.
        """
        if self.transform is not None:
            value = True
        else:
            value = False
        return GetBoolResponse(value)

    def image_callback(self, topic, data):
        """
        Image topic callback function. Uses the blobFinder to find the blobs
        (leds) in the image. Publishes images showing the calibration progress.
        """
        # Find blobs
        with self.lock:

            if not self.ready:
                return

            if self.state == WORKING:
                self.blobs[topic], blobs_image = self.blobFinder.findBlobs(
                    data)
                blobs_rosimage = self.bridge.cv_to_imgmsg(
                    blobs_image, encoding="passthrough")
            else:
                self.blobs[topic] = []

            if self.image_wait_cnt[topic] < self.image_wait_number:
                self.image_wait_cnt[topic] += 1

        # Create calibration data  image
        cv_image = self.bridge.imgmsg_to_cv(data,
                                            desired_encoding="passthrough")
        ipl_image = cv.GetImage(cv_image)
        calib_image = cv.CreateImage(cv.GetSize(ipl_image), cv.IPL_DEPTH_8U, 3)
        cv.CvtColor(ipl_image, calib_image, cv.CV_GRAY2BGR)

        # Add calibration markers to image
        color = STATE_COLOR[self.state]
        if self.state == WORKING:
            for image_point in self.index_to_image[topic].values():
                x, y = image_point
                cv.Circle(calib_image, (int(x), int(y)), 3, color)
        else:
            for index in self.overlap_indices:
                x, y = self.index_to_image[topic][index]
                cv.Circle(calib_image, (int(x), int(y)), 3, color)

        ## Add text to image
        message = [STATE_MESSAGE[self.state]]
        if self.state == WORKING or self.state == FINISHED:
            if self.state == WORKING:
                num_points_found = len(self.index_to_image[topic])
            else:
                num_points_found = len(self.overlap_indices)
            message.append('{0}/{1} pts'.format(num_points_found,
                                                self.get_led_count()))
        if self.image_info:
            message.append('- {0}'.format(self.image_info))
        if (self.state == FINISHED) and (self.transform_error is not None):
            message.append('- error {0:1.2f} mm'.format(1e3 *
                                                        self.transform_error))
        message = ' '.join(message)
        cv.PutText(calib_image, message, (10, 25), self.cv_text_font, color)

        # Publish calibration progress image
        calib_rosimage = self.bridge.cv_to_imgmsg(calib_image,
                                                  encoding="passthrough")
        self.image_pub[topic].publish(calib_rosimage)

    def wait_for_images(self):
        """
        Wait for 'image_wait_number' of images to be acquired.
        """
        with self.lock:
            for topic in self.topics:
                self.image_wait_cnt[topic] = 0
        done = False
        while not done:
            with self.lock:
                image_wait_cnt = min(
                    [self.image_wait_cnt[t] for t in self.topics])
            if image_wait_cnt >= self.image_wait_number:
                done = True
            rospy.sleep(self.wait_sleep_dt)

    def increment_led(self):
        """
        Increments the led array indices. When the last led is reached check to see if
        sufficient points have been captured for the homography calibratoin. If so, then
        the state is changed to FINISHED.  If insufficient points have been gathered then
        the state is changed back to WAITING.
        """

        if self.led_m < self.led_m_max - 1 or self.led_n < self.led_n_max - 1:
            if self.led_n < self.led_n_max - 1:
                self.led_n += 1
            else:
                self.led_n = 0
                self.led_m += 1
        else:

            # Turn off led and unlock active target
            mct_active_target.off()
            mct_active_target.unlock(self.node_name)

            # Need to check that we got enough done otherwise return to idle.
            self.state = FINISHED
            self.overlap_indices = self.get_overlap_indices()
            if len(self.overlap_indices) >= self.num_points_required:
                self.state = FINISHED
                self.image_info = ''
            else:
                self.state = WAITING
                self.image_info = 'not enough data'
                self.transform = None

    def get_overlap_indices(self):
        """
        Returns the overlaping points
        """
        indices_0 = self.index_to_image[self.topics[0]]
        indices_1 = self.index_to_image[self.topics[1]]
        overlap_indices = set(indices_0)
        overlap_indices.intersection_update(indices_1)
        return list(overlap_indices)

    def get_led_count(self):
        return self.led_n + self.led_m * self.led_n_max + 1

    def save_blob_data(self):
        """
        Save blob data found in images
        """
        with self.lock:
            for topic in self.topics:
                if len(self.blobs[topic]) == 1:
                    blob = self.blobs[topic][0]
                    image_x = blob['centroid_x']
                    image_y = blob['centroid_y']
                    self.index_to_image[topic][(self.led_n,
                                                self.led_m)] = (image_x,
                                                                image_y)

    def find_transform(self):
        """
        Find 2d transform (rotation + translation) from the world points found
        in topic[0] and topic[1] and thier respective homographies.
        """
        # Get world points for both topics for all points in overlap
        world_points_dict = {}

        for topic in self.topics:

            # Get homography from image coordinates to world coordinates
            homography_matrix = self.homography_dict[topic]
            homography_matrix_t = homography_matrix.transpose()

            # Get image points for indices in overlap
            image_points = []
            for index in self.overlap_indices:
                image_points.append(self.index_to_image[topic][index])
            image_points = numpy.array(image_points)

            # Convert to homogenious coordinates and compute world coordinates
            image_points_hg = numpy.ones((image_points.shape[0], 3))
            image_points_hg[:, :2] = image_points
            world_points_hg = numpy.dot(image_points_hg, homography_matrix_t)
            world_points = numpy.array(world_points_hg[:, :2])
            denom = numpy.zeros((world_points.shape[0], 2))
            denom[:, 0] = world_points_hg[:, 2]
            denom[:, 1] = world_points_hg[:, 2]
            world_points = world_points / denom
            world_points_dict[topic] = world_points

        # Estimate translate as mean difference in point positions
        points_0 = world_points_dict[self.topics[0]]
        points_1 = world_points_dict[self.topics[1]]
        rotation_est, translation_est = estimate_transform_2d(
            points_0, points_1)

        rotation, translation, error = fit_transform_2d(
            points_0, points_1, rotation_est, translation_est)

        self.transform = {
            'rotation': rotation,
            'translation_x': translation[0],
            'translation_y': translation[1],
        }

        self.transform_error = error

    def run(self):
        """
        Node main function. When the state is WORKING this function activates
        the leds on the active calibration target one at a time. When all of
        the leds have been activated and if sufficient number of points have
        been collected the homography matrix is calculated.
        """

        while not rospy.is_shutdown():

            if self.state == WORKING:
                # Turn on current led and wait for images
                mct_active_target.set_led(self.led_n, self.led_m,
                                          self.led_power)
                self.wait_for_images()
                self.save_blob_data()
                self.increment_led()

            elif self.state == FINISHED and self.transform is None:
                # Calculate 2d transforms
                self.find_transform()
            else:
                rospy.sleep(self.idle_sleep_dt)
Ejemplo n.º 12
0
class BlobFinderNode(object):
    def __init__(self, topic=None):
        self.topic = topic
        self.lock = threading.Lock()
        self.bridge = CvBridge()
        self.blobFinder = BlobFinder()
        self.blobFinder.threshold = 150
        self.blobFinder.filter_by_area = True
        self.blobFinder.min_area = 0
        self.blobFinder.max_area = 200
        self.topic_type = mct_introspection.get_topic_type(topic)

        rospy.init_node('blob_finder')
        self.ready = False
        if self.topic_type == 'sensor_msgs/Image':
            self.image_sub = rospy.Subscriber(self.topic, Image,
                                              self.image_callback)
        else:
            self.image_sub = rospy.Subscriber(self.topic, SeqAndImage,
                                              self.seq_and_image_callback)
        self.image_pub = rospy.Publisher('image_blobs', Image)
        self.blob_data_pub = rospy.Publisher('blob_data', BlobData)
        node_name = rospy.get_name()
        self.set_param_srv = rospy.Service('{0}/set_param'.format(node_name),
                                           BlobFinderSetParam,
                                           self.handle_set_param_srv)
        self.get_param_srv = rospy.Service('{0}/get_param'.format(node_name),
                                           BlobFinderGetParam,
                                           self.handle_get_param_srv)
        self.ready = True

    def handle_set_param_srv(self, req):
        """
        Handles requests to set the blob finder's parameters. Currently this
        is just the threshold used for binarizing the image.
        """
        with self.lock:
            self.blobFinder.threshold = req.threshold
            self.blobFinder.filter_by_area = req.filter_by_area
            self.blobFinder.min_area = req.min_area
            self.blobFinder.max_area = req.max_area
        return BlobFinderSetParamResponse(True, '')

    def handle_get_param_srv(self, req):
        """
        Handles requests for the blob finders parameters
        """
        with self.lock:
            threshold = self.blobFinder.threshold
            filter_by_area = self.blobFinder.filter_by_area
            min_area = self.blobFinder.min_area
            max_area = self.blobFinder.max_area
        resp_args = (threshold, filter_by_area, min_area, max_area)
        return BlobFinderGetParamResponse(*resp_args)

    def publish_blob_data(self, blobs_list, blobs_image, image_header,
                          image_seq):
        """
        Publish image of blobs and blob data.
        """
        blobs_rosimage = self.bridge.cv_to_imgmsg(blobs_image,
                                                  encoding="passthrough")
        self.image_pub.publish(blobs_rosimage)

        # Create the blob data message and publish
        blob_data_msg = BlobData()
        blob_data_msg.header = image_header
        blob_data_msg.image_seq = image_seq
        blob_data_msg.number_of_blobs = len(blobs_list)
        for b in blobs_list:
            blob_msg = Blob()
            for k, v in b.iteritems():
                setattr(blob_msg, k, v)
            blob_data_msg.blob.append(blob_msg)
        self.blob_data_pub.publish(blob_data_msg)

    def image_callback(self, data):
        """
        Callback for image topic subscription.
        """
        if not self.ready:
            return
        with self.lock:
            blobs_list, blobs_image = self.blobFinder.findBlobs(data)
        self.publish_blob_data(blobs_list, blobs_image, data.header,
                               data.header.seq)

    def seq_and_image_callback(self, data):
        """
        Callback for SeqAndImage topic subscription.
        """
        if not self.ready:
            return
        with self.lock:
            blobs_list, blobs_image = self.blobFinder.findBlobs(data.image)
        self.publish_blob_data(blobs_list, blobs_image, data.image.header,
                               data.seq)

    def run(self):
        rospy.spin()
Ejemplo n.º 13
0
class StitchedImageLabeler(object):
    def __init__(self,
                 stitched_image_topic,
                 tracking_pts_topic,
                 max_seq_age=150):
        self.lock = threading.Lock()
        self.bridge = CvBridge()
        self.stitching_params = file_tools.read_tracking_2d_stitching_params()
        self.stitched_image_topic = stitched_image_topic
        self.tracking_pts_topic = tracking_pts_topic
        self.max_seq_age = max_seq_age
        self.latest_seq = None
        self.seq_to_stitched_image = {}
        self.seq_to_tracking_pts = {}
        self.cv_text_font = cv.InitFont(cv.CV_FONT_HERSHEY_TRIPLEX,
                                        0.8,
                                        0.8,
                                        thickness=1)
        self.magenta = (255, 255, 0)
        self.tracking_pts_colors = [(0, 0, 255), (0, 255, 0), (0, 255, 255)]
        self.labeled_image = None

        self.ready = False
        rospy.init_node('stitched_image_labeler')

        # Subscribe to stitched image and tracking pts topics
        self.image_sub = rospy.Subscriber(self.stitched_image_topic,
                                          SeqAndImage,
                                          self.stitched_image_handler)
        self.tracking_pts_sub = rospy.Subscriber(self.tracking_pts_topic,
                                                 ThreePointTracker,
                                                 self.tracking_pts_handler)

        # Create labeled image publication
        self.labeled_image_pub = rospy.Publisher('image_stitched_labeled',
                                                 Image)

        self.ready = True

    def stitched_image_handler(self, data):
        """
        Callback for handling the sequence and stitched image topics.
        """

        if not self.ready:
            return

        with self.lock:
            self.latest_seq = data.seq
            self.seq_to_stitched_image[data.seq] = data.image

    def tracking_pts_handler(self, data):
        """
        Callback for handling the tracking point data from the tracker
        synchronizer.
        """

        if not self.ready:
            return

        with self.lock:
            if data.seq % self.stitching_params['frame_skip'] == 0:
                self.latest_seq = data.seq
                self.seq_to_tracking_pts[data.seq] = data

    def create_labeled_image(self, ros_image, tracking_pts):
        """
        Create labeled version of stitched image using the tracking points data
        """

        # Convert stitched image from rosimage to opencv image.
        cv_image = self.bridge.imgmsg_to_cv(ros_image,
                                            desired_encoding="passthrough")
        ipl_image = cv.GetImage(cv_image)

        # Create color version of stitched image.
        if self.labeled_image is None:
            labeled_image = cv.CreateImage(cv.GetSize(ipl_image),
                                           cv.IPL_DEPTH_8U, 3)
            cv.Zero(labeled_image)
        cv.CvtColor(ipl_image, labeled_image, cv.CV_GRAY2BGR)

        # Write sequence number on image
        message = '{0}'.format(tracking_pts.seq)
        cv.PutText(labeled_image, message, (10, 25), self.cv_text_font,
                   self.magenta)
        if tracking_pts.found:
            # Draw boundry box around tracked object
            p_list = [(int(pt.x), int(pt.y))
                      for pt in tracking_pts.bndry_stitching_plane]
            q_list = p_list[1:] + [p_list[0]]
            for p, q in zip(p_list, q_list):
                cv.Line(labeled_image, p, q, self.magenta)
            # Draw circles around tracked object points
            for pt, color in zip(tracking_pts.pts_stitching_plane,
                                 self.tracking_pts_colors):
                cv.Circle(labeled_image, (int(pt.x), int(pt.y)), 3, color)

        # Convert labeled image to ros image and publish
        labeled_rosimage = self.bridge.cv_to_imgmsg(labeled_image,
                                                    encoding="passthrough")
        self.labeled_image_pub.publish(labeled_rosimage)

        # DEBUG ###################################################
        #self.labeled_image_pub.publish(ros_image)
        ###########################################################

    def run(self):
        """
        Node main loop. Pairs up the tracking points data with the stitched
        images by sequence number and passes them to the create labeled image
        function. Also, cleans out any old data from the seq_to_xxx dictionaries.
        """
        while not rospy.is_shutdown():
            with self.lock:

                # Match up tracking points data with stitched images using sequence number
                for seq, tracking_pts in sorted(
                        self.seq_to_tracking_pts.items()):
                    try:
                        image = self.seq_to_stitched_image[seq]
                        found = True
                    except KeyError:
                        found = False

                    if found:
                        # Reomve items from  storage dictionaries
                        del self.seq_to_stitched_image[seq]
                        del self.seq_to_tracking_pts[seq]
                        self.create_labeled_image(image, tracking_pts)
                    else:
                        # Remove any elements seq_to_tracking_pts dict older than maximum allowed age
                        seq_age = self.latest_seq - seq
                        if seq_age > self.max_seq_age:
                            del self.seq_to_tracking_pts[seq]

                # Remove and elements form seq_to_stitched_image dict older than maximum allowed age
                for seq in self.seq_to_stitched_image.keys():
                    seq_age = self.latest_seq - seq
                    if seq_age > self.max_seq_age:
                        del self.seq_to_stitched_image[seq]
Ejemplo n.º 14
0
        if not opt.headless:
            #cv.ShowImage('left', l)
            #cv.ShowImage('right', r)
            cv.ShowImage('stereo-anaglyph', red_blue)
            k = cv.WaitKey(10)
            print k
            if k == escape:
                break
            if k == left:
                anaglyph_cyan_image_distance_correction = anaglyph_cyan_image_distance_correction - 1
                print anaglyph_cyan_image_distance_correction
            if k == right:
                anaglyph_cyan_image_distance_correction = anaglyph_cyan_image_distance_correction + 1
                print anaglyph_cyan_image_distance_correction
        else:
            rosimage = bridge.cv_to_imgmsg(red_blue, "bgra8")
            image_pub.publish(rosimage)













Ejemplo n.º 15
0
class ImageStitcher(object):
    """
    Subscribes to all rectified image topics in a 2d tracking region and uses
    the calibration data to stitch the images together. In turn it publishes
    image_stitched and image_stitched/seq.

    Note, in order to handle drop frames gracefully I've modified this node
    so that it can subscribe to the mct_msg_and_srv/SeqAndImage topics published
    by the frame_drop_corrector nodes. 
    """

    def __init__(
            self, 
            region, 
            topic_end='image_rect_skip', 
            topic_type='sensor_msgs/Image', 
            max_seq_age=150, 
            max_stamp_age=1.5
            ):

        self.topic_end = topic_end
        self.topic_type = topic_type
        self.max_seq_age = max_seq_age
        self.max_stamp_age = max_stamp_age

        self.warp_flags = cv.CV_INTER_LINEAR 
        self.stitching_params = file_tools.read_tracking_2d_stitching_params()

        rospy.init_node('image_stitcher')
        self.ready = False
        self.is_first_write = True
        self.stitched_image = None
        self.seq_to_images = {}  
        self.stamp_to_seq_pool= {}
        self.image_waiting_pool = {} 

        # Information on the latest sequence received and stitched
        self.seq_newest = None
        self.stamp_newest = None

        self.lock = threading.Lock()
        self.bridge = CvBridge()

        # Get image and camera information 
        regions_dict = file_tools.read_tracking_2d_regions()
        self.region = region
        self.camera_list = regions_dict[region]
        self.create_camera_to_image_dict()
        self.create_camera_to_info_dict()

        # Get transforms from cameras to stitched image and stitched image size
        self.tf2d = transform_2d.Transform2d()
        self.create_tf_data()
        self.get_stitched_image_size()

        # Subscribe to topics based on incoming topic type.
        if self.topic_type == 'sensor_msgs/Image':
            
            # Create pool dictionaries for incomming data.  
            for camera in self.camera_list:
                self.stamp_to_seq_pool[camera] = {}
                self.image_waiting_pool[camera] = {}

            # Subscribe to camera info topics
            self.info_sub = {} 
            for camera, topic  in  self.camera_to_info.iteritems():
                info_handler = functools.partial(self.info_handler, camera)
                self.info_sub[camera] = rospy.Subscriber(topic, CameraInfo, info_handler)

            # Subscribe to rectified image topics
            self.image_sub = {} 
            for camera, topic  in  self.camera_to_image.iteritems():
                image_handler = functools.partial(self.image_handler, camera)
                self.image_sub[camera] = rospy.Subscriber(topic, Image, image_handler)

        elif self.topic_type == 'mct_msg_and_srv/SeqAndImage':

            # Subscribe to SeqAndImage topics
            self.seq_and_image_sub = {}
            for camera, topic in self.camera_to_image.iteritems():
                seq_and_image_handler = functools.partial(self.seq_and_image_handler, camera)
                self.seq_and_image_sub[camera] = rospy.Subscriber(topic, SeqAndImage, seq_and_image_handler)

        else:
            err_msg = 'unable to handle topic type: {0}'.format(self.topic_type)
            raise ValueError, err_msg

        # Stitched image publisher and seq publisher
        self.image_pub = rospy.Publisher('image_stitched', Image)
        self.image_and_seq_pub = rospy.Publisher('seq_and_image_stitched', SeqAndImage)
        self.seq_pub = rospy.Publisher('image_stitched/seq', UInt32)
        self.stamp_pub = rospy.Publisher('image_stitched/stamp_info', StampInfo)
        self.processing_dt_pub = rospy.Publisher('image_stitched/processing_dt', ProcessingInfo)

        # Setup reset service - needs to be called anytime the camera trigger is  
        # stopped - before it is restarted. Empties buffers of images and sequences. 
        self.reset_srv = rospy.Service('reset_image_stitcher', Empty, self.handle_reset_srv)
        self.ready = True

    def handle_reset_srv(self, req):
        """
        Handles the nodes reset service - which empties the image and sequence buffers.
        """
        with self.lock:
            self.seq_to_images = {}  
            self.stamp_to_seq_pool= {}
            self.image_waiting_pool = {} 
            self.seq_newest = None
            self.stamp_newest = None
        return EmptyResponse()

    def create_camera_to_image_dict(self):
        """
        Create camera to image topic dictionary
        """
        self.camera_to_image = {}
        image_topics = mct_introspection.find_camera_image_topics(transport=self.topic_end)
        #for topic in rect_topics:
        for topic in image_topics:
            topic_split = topic.split('/')
            for camera in self.camera_list:
                if camera in topic_split:
                    self.camera_to_image[camera] = topic

    def create_camera_to_info_dict(self):
        """
        Create camera to image topic dictionary
        """
        self.camera_to_info = {}
        info_topics = mct_introspection.find_camera_info_topics()
        for topic in info_topics:
            topic_split = topic.split('/')
            for camera in self.camera_list:
                if camera in topic_split:
                    self.camera_to_info[camera] = topic

    def create_tf_data(self):
        """
        Pre-computes transform matrices and roi for creating the stitched
        images.
        """
        self.tf_data = {}
        for camera in self.camera_list:

            # Get modified transform which maps to ROI in stitched image 
            bbox = self.tf2d.get_stitching_plane_bounding_box(
                    region, 
                    camera_list=[camera]
                    )
            tf_matrix = self.tf2d.get_camera_to_stitching_plane_tf(camera)
            roi_x = int(math.floor(bbox['min_x']))
            roi_y = int(math.floor(bbox['min_y']))
            roi_w = int(math.floor(bbox['max_x']) - math.floor(bbox['min_x']+5))
            roi_h = int(math.floor(bbox['max_y']) - math.floor(bbox['min_y']+5))
            shift_matrix = numpy.array([
                [1.0, 0.0, -bbox['min_x']],
                [0.0, 1.0, -bbox['min_y']], 
                [0.0, 0.0, 1.0],
                ])
            tf_matrix = numpy.dot(shift_matrix, tf_matrix)
            self.tf_data[camera] = {
                    'matrix': cv.fromarray(tf_matrix), 
                    'roi': (roi_x, roi_y, roi_w, roi_h)
                    }

    def get_stitched_image_size(self):
        """
        Determines the size of the stitched image.
        """
        # Get stitched image size from bounding box
        bbox = self.tf2d.get_stitching_plane_bounding_box(region)
        self.image_width = int(math.ceil(bbox['max_x']))
        self.image_height = int(math.ceil(bbox['max_y']))
        self.image_size = (self.image_width, self.image_height)

    def info_handler(self,camera,data):
        """
        Handler for incoming camera info messages. In this callback we place
        image sequence number into stamp_to_seq_pool dictionay by camera name
        and timestamp 

        Note, only used when imcoming topics are of type sensor_msgs/Image. In
        this case both the Image and camera_info topics are need to associate
        the acquisition sequence numbers with the images as the seq numbers in
        the image headers aren't reliable.
        """
        if self.ready:
            with self.lock:
                stamp = data.header.stamp.secs, data.header.stamp.nsecs
                self.stamp_to_seq_pool[camera][stamp] = data.header.seq
                self.update_seq_newest(data.header.seq)
                self.update_stamp_newest(stamp)


    def image_handler(self, camera, data):
        """
        Handler for incoming camera images. In this callback we place the image
        data into the image_waiting_pool by camera name and timestamp. Note, we
        don't want to use the seq information in data.header.seq as this seems
        to initialized in some random way - probably has to do with python not
        fully supporting ROS's image_transport. The seq's from the camera_info
        topics are correct.

        Note, only used when imcoming topics are of type sensor_msgs/Image. In
        this case both the Image and camera_info topics are need to associate
        the acquisition sequence numbers with the images as the seq numbers in
        the image headers aren't reliable.
        """
        if self.ready:
            with self.lock:
                stamp = data.header.stamp.secs, data.header.stamp.nsecs
                self.image_waiting_pool[camera][stamp] = data

    def seq_and_image_handler(self,camera,data):
        """
        Handler for incoming SeqAndImage messages which contain both the image
        data and the frame drop corrected acquisition sequence numbers.
        
        Note, onle used when the incomind topics are of type
        mct_msg_and_srv/SeqAndImage.
        """
        if self.ready:
            with self.lock:
                try:
                    self.seq_to_images[data.seq][camera] = data.image
                except KeyError:
                    self.seq_to_images[data.seq] = {camera: data.image}
                self.update_seq_newest(data.seq)

    def update_seq_newest(self,seq): 
        if self.seq_newest is None:
            self.seq_newest = seq
        else:
            self.seq_newest = max([self.seq_newest, seq])

    def update_stamp_newest(self,stamp): 
        if self.stamp_newest is None:
            self.stamp_newest = stamp
        else:
            self.stamp_newest = max([self.stamp_newest, stamp])

    def process_waiting_images(self):
        """ 
        Processes waiting images. Associates images in the waiting pool with
        their acquisition sequence number and places them in the seq_to_images
        buffer.         
        """
        with self.lock:
            # Associate images in the waiting pool with their seq numbers 
            for camera in self.camera_list:
                for stamp, image_data in self.image_waiting_pool[camera].items():

                    try:
                        seq = self.stamp_to_seq_pool[camera][stamp]
                        del self.stamp_to_seq_pool[camera][stamp]
                        del self.image_waiting_pool[camera][stamp]
                        image_data.header.seq = seq 
                        deleted = True
                        try:
                            self.seq_to_images[seq][camera] = image_data
                        except KeyError:
                            self.seq_to_images[seq] = {camera: image_data}
                    except KeyError:
                        deleted = False

                    # Clear out stale data from image waiting pool
                    if not deleted:
                        stamp_age = self.get_stamp_age(stamp)
                        if stamp_age > self.max_stamp_age:
                            del self.image_waiting_pool[camera][stamp]

                # Clear out any stale data from stamp-to-seq pool
                for stamp, seq in self.stamp_to_seq_pool[camera].items():
                    seq_age = self.get_seq_age(seq)
                    if seq_age > self.max_seq_age: 
                        del self.stamp_to_seq_pool[camera][stamp]


    def get_stamp_age(self, stamp):
        """
        Computes the age of a time stamp.
        """
        stamp_secs = stamp_tuple_to_secs(stamp)
        stamp_newest_secs = stamp_tuple_to_secs(self.stamp_newest)
        return stamp_newest_secs - stamp_secs


    def get_seq_age(self, seq):
        """
        Compute sequece age - handling 32bit uint rollover of seq counter.  Note, at 
        30Hz this should roll over for something like 4yrs, but you never know.
        """
        age0 = abs(self.seq_newest - seq)
        age1 = MAX_UINT32 - seq + self.seq_newest + 1
        return min([age0, age1])


    def publish_stitched_image(self):
        """
        Checks to see if the sequences to images buffer contains all required frames and if so 
        produces and publishes a stitched image.
        """
        # Check to see if we have all images for a given sequence number
        for seq, image_dict in sorted(self.seq_to_images.items()): 

            # If we have all images for the sequece - stitch into merged view
            if len(image_dict) == len(self.camera_list):

                t0 = rospy.Time.now() 

                for i, camera in enumerate(self.camera_list):

                    # Convert ros image to opencv image
                    ros_image = image_dict[camera] 
                    cv_image = self.bridge.imgmsg_to_cv(
                            ros_image,
                            desired_encoding="passthrough"
                            )
                    ipl_image = cv.GetImage(cv_image)

                    # Create stitced image if it doesn't exist yet
                    if self.stitched_image is None:
                        self.stitched_image = cv.CreateImage(
                                self.image_size,
                                ipl_image.depth,
                                ipl_image.channels
                                )
                    if i==0:
                        cv.Zero(self.stitched_image)

                    # Set image warping flags - if first fill rest of image with zeros
                    if self.is_first_write:
                        warp_flags = self.warp_flags | cv.CV_WARP_FILL_OUTLIERS
                        self.is_first_write = False
                    else:
                        warp_flags = self.warp_flags

                    # Warp into stitched image 
                    cv.SetImageROI(self.stitched_image, self.tf_data[camera]['roi'])

                    # Warp stitched image
                    cv.WarpPerspective(
                            ipl_image, 
                            self.stitched_image, 
                            self.tf_data[camera]['matrix'],
                            flags=warp_flags,
                            )
                    cv.ResetImageROI(self.stitched_image)

                    # Get max and min time stamps for stamp_info
                    stamp = ros_image.header.stamp
                    if i == 0:
                        stamp_max = stamp 
                        stamp_min = stamp
                    else:
                        stamp_max = stamp if stamp.to_sec() > stamp_max.to_sec() else stamp_max 
                        stamp_mim = stamp if stamp.to_sec() < stamp_min.to_sec() else stamp_min 

                # Convert stitched image to ros image
                stitched_ros_image = self.bridge.cv_to_imgmsg(
                        self.stitched_image,
                        encoding="passthrough"
                        )
                stitched_ros_image.header.seq = seq
                self.image_pub.publish(stitched_ros_image)
                self.image_and_seq_pub.publish(seq, stitched_ros_image)
                self.seq_pub.publish(seq)

                # Compute time stamp spread
                stamp_diff = stamp_max.to_sec() - stamp_min.to_sec()
                self.stamp_pub.publish(stamp_max, stamp_min, stamp_diff)
                
                t1 = rospy.Time.now()
                dt = t1.to_sec() - t0.to_sec()
                self.processing_dt_pub.publish(dt,1.0/dt)

                # Remove data from buffer
                del self.seq_to_images[seq]

            # Throw away any stale data in seq to images buffer
            seq_age = self.get_seq_age(seq)
            if seq_age > self.max_seq_age:
                try:
                    del self.seq_to_images[seq]
                except KeyError:
                    pass

    def run(self):

        while not rospy.is_shutdown(): 
            if self.seq_newest is None:
                continue
            if self.topic_type == 'sensor_msgs/Image':
                self.process_waiting_images() 
            self.publish_stitched_image()
Ejemplo n.º 16
0
class FrameDropWatchdog(object):
    """
    Frame drop watchdog monitors the number of frames dropped by the system.     
    """
    def __init__(self, max_seq_age=150):

        rospy.init_node('frame_drop_watchdog')
        self.max_seq_age = max_seq_age
        self.lock = threading.Lock()
        self.frames_dropped = {}
        self.latest_seq = None
        self.ready = False

        camera_assignment = file_tools.read_camera_assignment()
        self.number_of_cameras = len(camera_assignment)

        self.bridge = CvBridge()
        self.info_image_size = (400, 90)
        self.font = PILImageFont.truetype(
            "/usr/share/fonts/truetype/ubuntu-font-family/Ubuntu-B.ttf", 16)

        # Subscribe to camera info topics
        self.frames_dropped_sub = {}
        frames_dropped_topics = self.wait_for_topics()
        for topic in frames_dropped_topics:
            camera = get_camera_from_topic(topic)
            handler = functools.partial(self.frames_dropped_handler, camera)
            self.frames_dropped_sub[camera] = rospy.Subscriber(
                topic, FramesDropped, handler)

        # Setup total frames dropped service
        self.total_dropped_pub = rospy.Publisher('total_frames_dropped',
                                                 FramesDropped)

        # Setup reset service
        self.reset_srv = rospy.Service('frame_drop_watchdog_reset', Empty,
                                       self.reset_handler)

        # Create watchdog info image
        self.image_watchdog_pub = rospy.Publisher('image_frame_drop_watchdog',
                                                  Image)

        self.ready = True

    def wait_for_topics(self):
        """
        Wait for the frames_dropped topics to be published.
        """
        while 1:
            frames_dropped_topics = mct_introspection.find_topics_w_ending(
                'frames_dropped')
            if len(frames_dropped_topics) == self.number_of_cameras:
                break
            rospy.sleep(0.25)
        return frames_dropped_topics

    def reset_handler(self, req):
        """
        Handler for the nodes reset service - empties the frames_dropped buffer.
        """
        with self.lock:
            self.frames_dropped = {}
            self.latest_seq = None
        return EmptyResponse()

    def frames_dropped_handler(self, camera, data):
        if not self.ready:
            return
        with self.lock:
            try:
                self.frames_dropped[data.seq][camera] = data.frames_dropped
            except KeyError:
                self.frames_dropped[data.seq] = {camera: data.frames_dropped}
            self.update_latest_seq(data.seq)

    def update_latest_seq(self, seq):
        if self.latest_seq is None:
            self.latest_seq = seq
        else:
            self.latest_seq = max([seq, self.latest_seq])

    def publish_watchdog_image(self, seq, total_frames_dropped,
                               cameras_w_drops):
        """
        Publish image for GUI w/ seq #, total frames dropped, other info?
        """
        pil_info_image = PILImage.new('RGB', self.info_image_size,
                                      (255, 255, 255))
        draw = PILImageDraw.Draw(pil_info_image)
        info_items = [
            ('seq', seq),
            ('dropped', total_frames_dropped),
            ('cameras', cameras_w_drops),
        ]
        text_x, text_y, step_y = 10, 10, 20
        for i, item in enumerate(info_items):
            label, value = item
            label_text = '{0}:'.format(label)
            if type(value) == float:
                value_text = '{0:<1.6f}'.format(value)
            else:
                value_text = '{0}'.format(value)
            draw.text((text_x, text_y + step_y * i),
                      label_text,
                      font=self.font,
                      fill=(0, 0, 0))
            draw.text((text_x + 100, text_y + step_y * i),
                      value_text,
                      font=self.font,
                      fill=(0, 0, 0))

        cv_info_image = cv.CreateImageHeader(pil_info_image.size,
                                             cv.IPL_DEPTH_8U, 3)
        cv.SetData(cv_info_image, pil_info_image.tostring())

        # Convert to a rosimage and publish
        info_rosimage = self.bridge.cv_to_imgmsg(cv_info_image, 'rgb8')
        self.image_watchdog_pub.publish(info_rosimage)

    def run(self):
        """
        Node, main loop.  While the node
        """
        while not rospy.is_shutdown():
            with self.lock:
                for seq, data in sorted(self.frames_dropped.items()):
                    if len(data) == self.number_of_cameras:
                        total_frames_dropped = sum(data.values())
                        self.total_dropped_pub.publish(seq,
                                                       total_frames_dropped)
                        cameras_w_drops = [
                            c for c, n in data.iteritems() if n > 0
                        ]
                        cameras_w_drops = [
                            int(c.split('_')[1]) for c in cameras_w_drops
                        ]
                        del self.frames_dropped[seq]
                        self.publish_watchdog_image(seq, total_frames_dropped,
                                                    cameras_w_drops)
                    else:
                        if self.latest_seq - seq > self.max_seq_age:
                            del self.frames_dropped[seq]
Ejemplo n.º 17
0
        if not opt.headless:
            #cv.ShowImage('left', l)
            #cv.ShowImage('right', r)
            cv.ShowImage('stereo-anaglyph', red_blue)
            k = cv.WaitKey(10)
            print k
            if k == escape:
                break
            if k == left:
                anaglyph_cyan_image_distance_correction = anaglyph_cyan_image_distance_correction - 1
                print anaglyph_cyan_image_distance_correction
            if k == right:
                anaglyph_cyan_image_distance_correction = anaglyph_cyan_image_distance_correction + 1
                print anaglyph_cyan_image_distance_correction
        else:
            rosimage = bridge.cv_to_imgmsg(red_blue, "bgra8")
            image_pub.publish(rosimage)

#from opencv import cv
#from opencv import highgui
#from time import sleep
#
#def makeMagic(left, right, out):
#    chans=[]
#    for i in range(6):
#        chans.append(cv.cvCreateImage(cv.cvGetSize(left),8,1))
#    cv.cvSplit(left, chans[0], chans[1], chans[2], None);
#    cv.cvSplit(right, chans[3], chans[4], chans[5], None);
#    cv.cvMerge(chans[3],chans[4],chans[2], None, out);
#
#    #cv.cvMerge(None,chans[1],None, None, out);
Ejemplo n.º 18
0
class ThreePointTracker_Synchronizer:
    """
    Synchronization node for all three point tracker in a given tracking region.
    """
    def __init__(self, region, max_seq_age=200):
        self.lock = threading.Lock()

        self.region = region
        regions_dict = file_tools.read_tracking_2d_regions()
        self.camera_list = regions_dict[region]
        self.camera_list.sort()
        self.create_camera_to_tracking_dict()
        self.latest_seq = None
        self.max_seq_age = max_seq_age
        self.tracking_pts_pool = {}

        self.tracking_pts_roi_size = rospy.get_param(
            '/three_point_tracker_params/roi_size', (150, 150))

        # Color and font for tracking points image
        self.magenta = (255, 255, 0)
        self.cv_text_font = cv.InitFont(cv.CV_FONT_HERSHEY_TRIPLEX,
                                        0.6,
                                        0.6,
                                        thickness=0)

        # Font for PIL tracking info image
        self.info_image_size = (180, 100)
        self.font = PILImageFont.truetype(
            "/usr/share/fonts/truetype/ubuntu-font-family/Ubuntu-B.ttf", 16)

        # Get transforms from cameras to tracking and stitched image planes
        self.tf2d = transform_2d.Transform2d()

        self.bridge = CvBridge()
        self.ready = False
        rospy.init_node('three_point_tracker_synchronizer',
                        log_level=rospy.DEBUG)

        # Subscribe to raw tracking pts topics
        self.tracking_pts_sub = {}
        for camera, topic in self.camera_to_tracking.iteritems():
            handler = functools.partial(self.tracking_pts_handler, camera)
            self.tracking_pts_sub[camera] = rospy.Subscriber(
                topic, ThreePointTrackerRaw, handler)

        # Create publishers
        self.tracking_pts_pub = rospy.Publisher('tracking_pts',
                                                ThreePointTracker)
        self.image_tracking_pts = None
        self.image_tracking_pts_pub = rospy.Publisher('image_tracking_pts',
                                                      Image)
        self.image_tracking_info_pub = rospy.Publisher('image_tracking_info',
                                                       Image)

        # Setup rand sync service
        rospy.wait_for_service('/get_rand_sync_signal')
        self.rand_sync_srv = rospy.ServiceProxy('/get_rand_sync_signal',
                                                GetRandSyncSignal)

        # Setup reset service - needs to be called anytime the camera trigger is
        # stopped - before it is restarted. Empties buffers of images and sequences.
        self.reset_srv = rospy.Service('reset_tracker_synchronizer', Empty,
                                       self.reset_handler)
        self.ready = True

    def create_camera_to_tracking_dict(self):
        """
        Creates a dictionary relating the cameras in the tracking region to their
        corresponding three point tracker nodes.
        """
        self.camera_to_tracking = {}
        for camera in self.camera_list:
            camera_fullpath_topic = mct_introspection.get_camera_fullpath_topic(
                camera)
            tracking_pts_topic = '{0}/tracking_pts'.format(
                camera_fullpath_topic)
            self.camera_to_tracking[camera] = tracking_pts_topic

    def reset_handler(self, req):
        """
        Reset service handler. Empties the tracking_pts_pool.
        """
        with self.lock:
            self.latest_seq = None
            self.tracking_pts_pool = {}
        return EmptyResponse()

    def tracking_pts_handler(self, camera, msg):
        """
        Handler for messages from the individual tracker nodes. Sticks the tracking
        point data into a dictionary by sequence number and camera.
        """
        if not self.ready:
            return
        with self.lock:
            self.latest_seq = msg.data.seq
            try:
                self.tracking_pts_pool[msg.data.seq][camera] = msg
            except KeyError:
                self.tracking_pts_pool[msg.data.seq] = {camera: msg}

    def process_tracking_pts(self, tracking_pts_dict):
        """
        Determines whether the object has been found in any of the three point
        trackers for the region. If is has been found than best tracking point
        data is selected in a winner takes all fashion. The best tracking point
        data is that which is nearest to the center of the image on camera upon
        which is was captured. 
        """

        # Get time stamp (secs, nsecs) - always from the same camera to avoid jumps due to
        # possible system clock differences.
        time_camera = self.camera_list[0]
        time_camera_secs = tracking_pts_dict[time_camera].data.secs
        time_camera_nsecs = tracking_pts_dict[time_camera].data.nsecs

        # Get list of messages in which the object was found
        found_list = [
            msg for msg in tracking_pts_dict.values() if msg.data.found
        ]
        tracking_pts_msg = ThreePointTracker()

        if found_list:

            # Object found - select object with largest ROI or if the ROIs are of equal size
            # select the object whose distance to center of the # image is the smallest
            found_list.sort(cmp=tracking_pts_sort_cmp)
            best = found_list[0]
            camera = best.data.camera

            # Get coordintates of points in tracking and stitching planes
            best_points_array = numpy.array([(p.x, p.y)
                                             for p in best.data.points])
            pts_anchor_plane = self.tf2d.camera_pts_to_anchor_plane(
                camera, best_points_array)
            pts_stitching_plane = self.tf2d.camera_pts_to_stitching_plane(
                camera, best_points_array)
            pts_anchor_plane = [
                Point2d(p[0], p[1]) for p in list(pts_anchor_plane)
            ]
            pts_stitching_plane = [
                Point2d(p[0], p[1]) for p in list(pts_stitching_plane)
            ]

            # Get orientation angle and mid point of object in anchor and stitching planes
            angle = get_angle(pts_anchor_plane)
            midpt_anchor_plane = get_midpoint(pts_anchor_plane)
            midpt_stitching_plane = get_midpoint(pts_stitching_plane)
            #self.camera_fail = max([(err,cam) for cam, err in self.max_error_by_camera.items()])[1]

            # Get size of tracking points image in the anchor (tracking) plane
            roi = best.data.roi
            x0, x1 = roi[0], roi[0] + roi[2]
            y0, y1 = roi[1], roi[1] + roi[3]
            bndry_camera = [(x0, y0), (x1, y0), (x1, y1), (x0, y1)]
            bndry_camera_array = numpy.array(bndry_camera)
            bndry_anchor = self.tf2d.camera_pts_to_anchor_plane(
                camera, bndry_camera_array)
            bndry_stitching = self.tf2d.camera_pts_to_stitching_plane(
                camera, bndry_camera_array)
            bndry_anchor = [tuple(x) for x in list(bndry_anchor)]
            bndry_stitching = [tuple(x) for x in list(bndry_stitching)]
            dx1 = abs(bndry_anchor[1][0] - bndry_anchor[0][0])
            dx2 = abs(bndry_anchor[3][0] - bndry_anchor[2][0])
            dy1 = abs(bndry_anchor[2][1] - bndry_anchor[1][1])
            dy2 = abs(bndry_anchor[3][1] - bndry_anchor[0][1])
            dx_max = max([dx1, dx2])
            dy_max = max([dy1, dy2])
            dim_max = max([dx_max, dy_max])

            # Convert tracking points image to opencv image.
            image_tracking_pts = self.bridge.imgmsg_to_cv(
                best.image, desired_encoding="passthrough")
            image_tracking_pts = cv.GetImage(image_tracking_pts)
            image_size = cv.GetSize(image_tracking_pts)
            image_dim_max = max(image_size)

            # Get matrix for homography from camera to  anchor plane
            tf_matrix = self.tf2d.get_camera_to_anchor_plane_tf(camera)

            # Shift for local ROI
            tf_shift = numpy.array([
                [1.0, 0.0, roi[0]],
                [0.0, 1.0, roi[1]],
                [0.0, 0.0, 1.0],
            ])
            tf_matrix = numpy.dot(tf_matrix, tf_shift)

            # Get scaling factor
            shift_x = -min([x for x, y in bndry_anchor])
            shift_y = -min([y for x, y in bndry_anchor])
            scale_factor = float(image_dim_max) / dim_max

            # Scale and shift transform so that homography maps the tracking points
            # sub-image into a image_size image starting at coord. 0,0
            tf_shift_and_scale = numpy.array([
                [scale_factor, 0.0, scale_factor * shift_x],
                [0.0, scale_factor, scale_factor * shift_y],
                [0.0, 0.0, 1.0],
            ])
            tf_matrix = numpy.dot(tf_shift_and_scale, tf_matrix)

            # Warp image using homography
            image_tracking_pts_mod = cv.CreateImage(
                image_size, image_tracking_pts.depth,
                image_tracking_pts.channels)
            cv.WarpPerspective(
                image_tracking_pts,
                image_tracking_pts_mod,
                cv.fromarray(tf_matrix),
                cv.CV_INTER_LINEAR | cv.CV_WARP_FILL_OUTLIERS,
            )

            # Add sequence number to image
            message = '{0}'.format(best.data.seq)
            cv.PutText(image_tracking_pts_mod, message, (2, 15),
                       self.cv_text_font, self.magenta)
            self.image_tracking_pts = self.bridge.cv_to_imgmsg(
                image_tracking_pts_mod, encoding="passthrough")

            # Create tracking points message
            tracking_pts_msg.seq = best.data.seq
            tracking_pts_msg.secs = time_camera_secs
            tracking_pts_msg.nsecs = time_camera_nsecs
            tracking_pts_msg.camera = camera
            tracking_pts_msg.found = True
            tracking_pts_msg.angle = angle
            tracking_pts_msg.angle_deg = (180.0 * angle) / math.pi
            tracking_pts_msg.midpt_anchor_plane = midpt_anchor_plane
            tracking_pts_msg.midpt_stitching_plane = midpt_stitching_plane
            tracking_pts_msg.pts_anchor_plane = pts_anchor_plane
            tracking_pts_msg.pts_stitching_plane = pts_stitching_plane
            tracking_pts_msg.bndry_anchor_plane = [
                Point2d(x, y) for x, y in bndry_anchor
            ]
            tracking_pts_msg.bndry_stitching_plane = [
                Point2d(x, y) for x, y in bndry_stitching
            ]
        else:
            sample = tracking_pts_dict.values()[0]
            tracking_pts_msg.seq = sample.data.seq
            tracking_pts_msg.secs = time_camera_secs
            tracking_pts_msg.nsecs = time_camera_nsecs
            tracking_pts_msg.camera = ''
            tracking_pts_msg.found = False

        # Create tracking info image
        pil_info_image = PILImage.new('RGB', self.info_image_size,
                                      (255, 255, 255))
        draw = PILImageDraw.Draw(pil_info_image)
        text_list = []

        label = 'Found:'
        value = '{0}'.format(tracking_pts_msg.found)
        unit = ''
        text_list.append((label, value, unit))

        label = 'Angle:'
        value = '{0:+3.1f}'.format(180.0 * tracking_pts_msg.angle / math.pi)
        unit = 'deg'
        text_list.append((label, value, unit))

        label = 'Pos X:'
        value = '{0:+1.3f}'.format(tracking_pts_msg.midpt_anchor_plane.x)
        unit = 'm'
        text_list.append((label, value, unit))
        #self.camera_fail = max([(err,cam) for cam, err in self.max_error_by_camera.items()])[1]

        label = 'Pos Y:'
        value = '{0:+1.3f}'.format(tracking_pts_msg.midpt_anchor_plane.y)
        unit = 'm'
        text_list.append((label, value, unit))

        for i, text in enumerate(text_list):
            label, value, unit = text
            draw.text((10, 10 + 20 * i), label, font=self.font, fill=(0, 0, 0))
            draw.text((70, 10 + 20 * i), value, font=self.font, fill=(0, 0, 0))
            draw.text((125, 10 + 20 * i), unit, font=self.font, fill=(0, 0, 0))
        cv_info_image = cv.CreateImageHeader(pil_info_image.size,
                                             cv.IPL_DEPTH_8U, 3)
        cv.SetData(cv_info_image, pil_info_image.tostring())

        # Convert to a rosimage and publish
        info_rosimage = self.bridge.cv_to_imgmsg(cv_info_image, 'rgb8')
        self.image_tracking_info_pub.publish(info_rosimage)

        # Get sync signal
        sync_rsp = self.rand_sync_srv(tracking_pts_msg.seq)
        if sync_rsp.status:
            tracking_pts_msg.sync_signal = sync_rsp.signal
        else:
            tracking_pts_msg.sync_signal = [0, 0, 0]

        # Publish messages
        self.tracking_pts_pub.publish(tracking_pts_msg)
        if self.image_tracking_pts is not None:
            self.image_tracking_pts_pub.publish(self.image_tracking_pts)
        else:
            zero_image_cv = cv.CreateImage(self.tracking_pts_roi_size,
                                           cv.IPL_DEPTH_8U, 3)
            cv.Zero(zero_image_cv)
            zero_image_ros = self.bridge.cv_to_imgmsg(zero_image_cv,
                                                      encoding="passthrough")
            self.image_tracking_pts_pub.publish(zero_image_ros)

    def run(self):
        """
        Node main loop - consolidates the tracking points messages by
        acquisition sequence number and proccess them by passing them to the
        process_tracking_pts function.
        """
        while not rospy.is_shutdown():

            with self.lock:

                #t0 = time.time()
                #print('len pool:', len(self.tracking_pts_pool))
                for seq, tracking_pts_dict in sorted(
                        self.tracking_pts_pool.items()):
                    #print(' ', seq)

                    # Check if we have all the tracking data for the given sequence number
                    if len(tracking_pts_dict) == len(self.camera_list):
                        self.process_tracking_pts(tracking_pts_dict)
                        del self.tracking_pts_pool[seq]

                    # Throw away tracking data greater than the maximum allowed age
                    if self.latest_seq - seq > self.max_seq_age:
                        #print('Thowing away: ', seq)
                        del self.tracking_pts_pool[seq]
Ejemplo n.º 19
0
class HomographyCalibratorNode(object):
    def __init__(self, topic):

        self.ready = False
        self.state = WAITING  # There are 3 allowed states WAITING, WORKING, FINISHED
        self.topic = topic
        self.bridge = CvBridge()
        self.lock = threading.Lock()

        rospy.init_node('homography_calibrator')
        self.node_name = rospy.get_name()

        # Initialize data lists
        self.blobs_list = []
        self.image_points = []
        self.world_points = []

        # Set active target information and turn off leds
        target_info = mct_active_target.active_target_info()
        self.led_n_max = target_info[0]
        self.led_m_max = target_info[1]
        self.number_of_leds = self.led_n_max * self.led_m_max
        self.led_max_power = target_info[2]
        self.led_space_x = target_info[3]
        self.led_space_y = target_info[3]
        mct_active_target.off()

        # Current led indices
        self.led_n = 0
        self.led_m = 0

        # Led power setting
        params_namespace = '/homography_calibrator_params'
        self.led_power = rospy.get_param(
            '{0}/target/led_power'.format(params_namespace), 10)

        # Wait count for image acquisition
        self.image_wait_number = rospy.get_param(
            '{0}/image_wait_number'.format(params_namespace), 4)
        self.image_wait_cnt = 0

        # Sleep periods for idle and wait count loops
        self.idle_sleep_dt = 0.25
        self.wait_sleep_dt = 0.005

        # Initialize blob finder
        self.blobFinder = BlobFinder()
        self.blobFinder.threshold = rospy.get_param(
            '{0}/blob_finder/threshold'.format(params_namespace), 200)
        self.blobFinder.filter_by_area = rospy.get_param(
            '{0}/blob_finder/filter_by_area'.format(params_namespace), False)
        self.blobFinder.min_area = rospy.get_param(
            '{0}/blob_finder/min_area'.format(params_namespace), 0)
        self.blobFinder.max_area = rospy.get_param(
            '{0}/blob_finder/max_area'.format(params_namespace), 200)

        # Initialize homography matrix and number of points required to solve for it
        self.homography_matrix = None
        self.num_points_required = rospy.get_param(
            '{0}/num_points_required'.format(params_namespace), 10)

        # Set font and initial image information
        self.cv_text_font = cv.InitFont(cv.CV_FONT_HERSHEY_TRIPLEX,
                                        0.8,
                                        0.8,
                                        thickness=1)
        self.image_info = 'no data'

        # Subscription to image topic
        self.image_sub = rospy.Subscriber(self.topic, Image,
                                          self.image_callback)

        # Publications - blobs image and calibration progress image
        self.image_blobs_pub = rospy.Publisher('image_homography_blobs', Image)
        self.image_calib_pub = rospy.Publisher('image_homography_calibration',
                                               Image)

        # Services
        self.start_srv = rospy.Service('{0}/start'.format(self.node_name),
                                       GetFlagAndMessage,
                                       self.handle_start_srv)

        self.get_matrix_srv = rospy.Service(
            '{0}/get_matrix'.format(self.node_name), GetMatrix,
            self.handle_get_matrix_srv)

        self.is_calibrated_srv = rospy.Service(
            '{0}/is_calibrated'.format(self.node_name), GetBool,
            self.handle_is_calibrated_srv)
        self.ready = True

    def handle_start_srv(self, req):
        """
        Handles to start/restart the homography calibration procedure.
        """
        flag = True
        message = ''
        with self.lock:
            flag, message = mct_active_target.lock(self.node_name)
            if flag:
                self.homography_matrix = None
                self.image_info = ''
                self.image_points = []
                self.world_points = []
                self.blobs_list = []
                self.state = WORKING
                self.led_n = 0
                self.led_m = 0
        return GetFlagAndMessageResponse(flag, message)

    def handle_get_matrix_srv(self, req):
        """
        Returns the homography matrix. If the homography matrix has not yet be computed the 
        identity matirx is returned.
        """
        if self.homography_matrix is None:
            data = [1, 0, 0, 0, 1, 0, 0, 0, 1]
        else:
            data = self.homography_matrix.reshape((9, ))
        return GetMatrixResponse(3, 3, data)

    def handle_is_calibrated_srv(self, req):
        """
        Handles requests for whether or not the homography calibration has been completed.
        """
        if self.homography_matrix is not None:
            value = True
        else:
            value = False
        return GetBoolResponse(value)

    def image_callback(self, data):
        """
        Image topic callback function. Uses the blobFinder to find the blobs
        (leds) in the image. Publishes images showing the blobs and the
        calibration progress.
        """
        # Find blobs
        with self.lock:

            if not self.ready:
                return

            if self.state == WORKING:
                self.blobs_list, blobs_image = self.blobFinder.findBlobs(data)
                blobs_rosimage = self.bridge.cv_to_imgmsg(
                    blobs_image, encoding="passthrough")
            else:
                self.blobs_list = []
                blobs_rosimage = data

            if self.image_wait_cnt < self.image_wait_number:
                self.image_wait_cnt += 1

        self.image_blobs_pub.publish(blobs_rosimage)

        # Create calibration data  image
        cv_image = self.bridge.imgmsg_to_cv(data,
                                            desired_encoding="passthrough")
        ipl_image = cv.GetImage(cv_image)
        calib_image = cv.CreateImage(cv.GetSize(ipl_image), cv.IPL_DEPTH_8U, 3)
        cv.CvtColor(ipl_image, calib_image, cv.CV_GRAY2BGR)

        # Add calibration markers to image
        color = STATE_COLOR[self.state]
        for x, y in self.image_points:
            cv.Circle(calib_image, (int(x), int(y)), 3, color)

        ## Add text to image
        message = [STATE_MESSAGE[self.state]]

        if self.state == WORKING or self.state == FINISHED:
            #message.append('{0}/{1} pts'.format(len(self.image_points),self.number_of_leds))
            message.append('{0}/{1} pts'.format(len(self.image_points),
                                                self.get_led_count()))
        if self.image_info:
            message.append('- {0}'.format(self.image_info))
        message = ' '.join(message)
        cv.PutText(calib_image, message, (10, 25), self.cv_text_font, color)

        # Publish calibration progress image
        calib_rosimage = self.bridge.cv_to_imgmsg(calib_image,
                                                  encoding="passthrough")
        self.image_calib_pub.publish(calib_rosimage)

    def wait_for_images(self):
        """
        Wait for 'image_wait_number' of images to be acquired.
        """
        with self.lock:
            self.image_wait_cnt = 0
        done = False
        while not done:
            with self.lock:
                image_wait_cnt = self.image_wait_cnt
            if image_wait_cnt >= self.image_wait_number:
                done = True
            rospy.sleep(self.wait_sleep_dt)

    def increment_led(self):
        """
        Increments the led array indices. When the last led is reached check to see if
        sufficient points have been captured for the homography calibratoin. If so, then
        the state is changed to FINISHED.  If insufficient points have been gathered then
        the state is changed back to WAITING.
        """

        if self.led_m < self.led_m_max - 1 or self.led_n < self.led_n_max - 1:
            if self.led_n < self.led_n_max - 1:
                self.led_n += 1
            else:
                self.led_n = 0
                self.led_m += 1
        else:

            # Turn off led and unlock active target
            mct_active_target.off()
            mct_active_target.unlock(self.node_name)

            # Need to check that we got enough done otherwise return to idle.
            if len(self.image_points) >= self.num_points_required:
                self.state = FINISHED
                self.image_info = ''
            else:
                self.state = WAITING
                self.image_info = 'not enough data'
                self.homography_matrix = None

    def get_led_count(self):
        return self.led_n + self.led_m * self.led_n_max + 1

    def run(self):
        """
        Node main function. When the state is WORKING this function activates the leds on the
        active calibration target one at a time. When all of the leds have been activated and
        if sufficient number of points have been collected the homography matrix is calculated.
        """

        while not rospy.is_shutdown():

            if self.state == WORKING:

                # Turn on current led and wait for images
                mct_active_target.set_led(self.led_n, self.led_m,
                                          self.led_power)
                self.wait_for_images()

                if len(self.blobs_list) == 1:

                    # Get centroid of blob and add to image point list
                    blob = self.blobs_list[0]
                    image_x = blob['centroid_x']
                    image_y = blob['centroid_y']
                    self.image_points.append((image_x, image_y))

                    # Compute coordinates of the led in the world plane
                    world_x = self.led_n * self.led_space_x
                    world_y = self.led_m * self.led_space_y
                    self.world_points.append((world_x, world_y))

                self.increment_led()

            elif self.state == FINISHED and self.homography_matrix is None:

                # Find the homography transformation
                image_points = numpy.array(self.image_points)
                world_points = numpy.array(self.world_points)
                result = cv2.findHomography(image_points, world_points,
                                            cv.CV_RANSAC)
                self.homography_matrix, mask = result

                # Compute the mean reprojection error
                image_points_hg = numpy.ones((image_points.shape[0], 3))
                image_points_hg[:, :2] = image_points
                world_points_hg = numpy.ones((world_points.shape[0], 3))
                world_points_hg[:, :2] = world_points
                homography_matrix_t = self.homography_matrix.transpose()
                world_points_hg_pred = numpy.dot(image_points_hg,
                                                 homography_matrix_t)
                denom = numpy.zeros((world_points.shape[0], 2))
                denom[:, 0] = world_points_hg_pred[:, 2]
                denom[:, 1] = world_points_hg_pred[:, 2]
                world_points_pred = world_points_hg_pred[:, :2] / denom
                error = (world_points - world_points_pred)**2
                error = error.sum(axis=1)
                error = numpy.sqrt(error)
                error = error.mean()
                self.image_info = 'error {0:1.2f} mm'.format(1e3 * error)
            else:
                rospy.sleep(self.idle_sleep_dt)
Ejemplo n.º 20
0
class ThreePointTracker(object):
    """
    Three point object tracker. Looks for objects in the image stream with
    three bright unequally spaced points.
    """
    def __init__(self, topic=None, max_stamp_age=1.5):

        self.lock = threading.Lock()
        self.ready = False
        self.topic = topic
        self.tracking_pts_roi = None
        self.tracking_pts_roi_src = None
        self.tracking_pts_roi_dst = None
        self.seq_to_data = {}

        self.topic_type = mct_introspection.get_topic_type(self.topic)
        self.bridge = CvBridge()
        self.camera = get_camera_from_topic(self.topic)
        self.camera_info_topic = get_camera_info_from_image_topic(self.topic)
        rospy.init_node('blob_finder')

        # Get parameters from the parameter server
        params_ns = 'three_point_tracker_params'
        self.blobFinder = BlobFinder()
        self.tracking_pts_roi_size = rospy.get_param(
            '/{0}/roi_size'.format(params_ns),
            (200, 200),
        )
        self.blobFinder.threshold = rospy.get_param(
            '/{0}/threshold'.format(params_ns),
            200,
        )
        self.blobFinder.filter_by_area = rospy.get_param(
            '/{0}/filter_by_area'.format(params_ns), False)
        self.blobFinder.min_area = rospy.get_param(
            '/{0}/min_area'.format(params_ns),
            0,
        )
        self.blobFinder.max_area = rospy.get_param(
            '/{0}/max_area'.format(params_ns),
            200,
        )
        self.tracking_pts_spacing = rospy.get_param(
            '/{0}/pts_spacing'.format(params_ns),
            (0.0, 0.04774, 0.07019),
        )
        self.tracking_pts_colors = [(0, 0, 255), (0, 255, 0), (0, 255, 255)]

        # Determine target point spacing
        d0 = self.tracking_pts_spacing[1] - self.tracking_pts_spacing[0]
        d1 = self.tracking_pts_spacing[2] - self.tracking_pts_spacing[1]
        if d0 > d1:
            self.large_step_first = True
        else:
            self.large_step_first = False

        # Set up subscribers based on topic type.
        if self.topic_type == 'sensor_msgs/Image':

            # Data dictionareis for synchronizing tracking data with image seq number
            self.max_stamp_age = max_stamp_age
            self.latest_stamp = None
            self.stamp_to_seq = {}
            self.stamp_to_data = {}

            # Subscribe to image and camera info topics
            self.image_sub = rospy.Subscriber(self.topic, Image,
                                              self.image_callback)
            self.info_sub = rospy.Subscriber(self.camera_info_topic,
                                             CameraInfo,
                                             self.camera_info_callback)

        elif self.topic_type == 'mct_msg_and_srv/SeqAndImage':

            self.seq_and_image_sub = rospy.Subscriber(
                self.topic, SeqAndImage, self.seq_and_image_callback)

        else:
            err_msg = 'unable to handle topic type: {0}'.format(
                self.topic_type)
            raise ValueError, err_msg

        # Create tracking point and tracking points image publications
        self.tracking_pts_pub = rospy.Publisher('tracking_pts',
                                                ThreePointTrackerRaw)
        self.image_tracking_pts_pub = rospy.Publisher('image_tracking_pts',
                                                      Image)

        # Set up services for getting and setting the tracking parameters.
        node_name = rospy.get_name()
        self.set_param_srv = rospy.Service('{0}/set_param'.format(node_name),
                                           BlobFinderSetParam,
                                           self.handle_set_param_srv)
        self.get_param_srv = rospy.Service('{0}/get_param'.format(node_name),
                                           BlobFinderGetParam,
                                           self.handle_get_param_srv)
        self.ready = True

    def handle_set_param_srv(self, req):
        """
        Handles requests to set the blob finder's parameters. Currently this
        is just the threshold used for binarizing the image.
        """
        with self.lock:
            self.blobFinder.threshold = req.threshold
            self.blobFinder.filter_by_area = req.filter_by_area
            self.blobFinder.min_area = req.min_area
            self.blobFinder.max_area = req.max_area
        return BlobFinderSetParamResponse(True, '')

    def handle_get_param_srv(self, req):
        """
        Handles requests for the blob finders parameters
        """
        with self.lock:
            threshold = self.blobFinder.threshold
            filter_by_area = self.blobFinder.filter_by_area
            min_area = self.blobFinder.min_area
            max_area = self.blobFinder.max_area
        resp_args = (threshold, filter_by_area, min_area, smax_area)
        return BlobFinderGetParamResponse(*resp_args)

    def camera_info_callback(self, camera_info):
        """
        Callback for camera info topic subscription - used to get the image seq
        number when subscribing to sensor_msgs/Images topics as the seq numbers
        in the Image headers are not reliable.

        Note, only used when subscription is of type sensor_msgs/Image.
        """
        stamp_tuple = camera_info.header.stamp.secs, camera_info.header.stamp.nsecs
        with self.lock:
            self.latest_stamp = stamp_tuple
            self.stamp_to_seq[stamp_tuple] = camera_info.header.seq

    def image_callback(self, image):
        """
        Callback for image topic subscription - gets images and finds the
        tracking points. Finds tracking data in  

        Note, only used when subscription is of type sensor_msgs/Image. 
        """
        if not self.ready:
            return
        with self.lock:
            blobs_list = self.blobFinder.findBlobs(image, create_image=False)
        tracking_data = self.get_tracking_data(blobs_list, image)
        with self.lock:
            self.stamp_to_data[tracking_data['stamp']] = tracking_data

    def seq_and_image_callback(self, msg_data):
        """
        Callback for subscriptions of type mct_msg_and_srv/SeqAndImage type.
        Finds blobs in msg_data.image and extracts tracking data.
        """
        if not self.ready:
            return
        with self.lock:
            blobs_list = self.blobFinder.findBlobs(msg_data.image,
                                                   create_image=False)
        tracking_data = self.get_tracking_data(blobs_list, msg_data.image)
        with self.lock:
            self.seq_to_data[msg_data.seq] = tracking_data

    def get_tracking_data(self, blobs_list, rosimage):
        """
        Gets tracking data from list of blobs and raw image.
        """
        # Convert to opencv image
        cv_image = self.bridge.imgmsg_to_cv(rosimage,
                                            desired_encoding="passthrough")
        ipl_image = cv.GetImage(cv_image)

        # Create tracking points  image
        image_tracking_pts = cv.CreateImage(self.tracking_pts_roi_size,
                                            cv.IPL_DEPTH_8U, 3)
        cv.Zero(image_tracking_pts)

        num_blobs = len(blobs_list)
        if num_blobs == 3:
            found = True
            uv_list = self.get_sorted_uv_points(blobs_list)
            self.tracking_pts_roi = self.get_tracking_pts_roi(
                uv_list, cv.GetSize(ipl_image))
            dist_to_image_center = self.get_dist_to_image_center(
                uv_list, cv.GetSize(ipl_image))
        else:
            found = False
            dist_to_image_center = 0.0
            uv_list = [(0, 0), (0, 0), (0, 0)]

        # Create tracking pts image using ROI around tracking points
        if self.tracking_pts_roi is not None:
            src_roi, dst_roi = truncate_roi(self.tracking_pts_roi,
                                            cv.GetSize(ipl_image))
            cv.SetImageROI(ipl_image, src_roi)
            cv.SetImageROI(image_tracking_pts, dst_roi)
            cv.CvtColor(ipl_image, image_tracking_pts, cv.CV_GRAY2BGR)
            cv.ResetImageROI(ipl_image)
            cv.ResetImageROI(image_tracking_pts)
            self.tracking_pts_roi_src = src_roi
            self.tracking_pts_roi_dst = dst_roi
            if found:
                for i, uv in enumerate(uv_list):
                    color = self.tracking_pts_colors[i]
                    u, v = uv
                    u = u - self.tracking_pts_roi[0]
                    v = v - self.tracking_pts_roi[1]
                    cv.Circle(image_tracking_pts, (int(u), int(v)), 3, color)

        # Convert tracking points image to rosimage and publish
        rosimage_tracking_pts = self.bridge.cv_to_imgmsg(
            image_tracking_pts, encoding="passthrough")
        self.image_tracking_pts_pub.publish(rosimage_tracking_pts)

        # If tracking points roi doesn't exist yet just send zeros
        if self.tracking_pts_roi is None:
            tracking_pts_roi = (0, 0, 0, 0)
            tracking_pts_roi_src = (0, 0, 0, 0)
            tracking_pts_roi_dst = (0, 0, 0, 0)
        else:
            tracking_pts_roi = self.tracking_pts_roi
            tracking_pts_roi_src = self.tracking_pts_roi_src
            tracking_pts_roi_dst = self.tracking_pts_roi_dst

        stamp_tuple = rosimage.header.stamp.secs, rosimage.header.stamp.nsecs

        tracking_data = {
            'found': found,
            'stamp': stamp_tuple,
            'tracking_pts': uv_list,
            'image_tracking_pts': rosimage_tracking_pts,
            'dist_to_image_center': dist_to_image_center,
            'tracking_pts_roi': tracking_pts_roi,
            'tracking_pts_roi_src': tracking_pts_roi_src,
            'tracking_pts_roi_dst': tracking_pts_roi_dst,
        }
        return tracking_data

    def get_dist_to_image_center(self, uv_list, img_size):
        """
        Computes the distance from the mid point of the tracking points to the
        middle of the image.
        """
        img_mid = 0.5 * img_size[0], 0.5 * img_size[1]
        pts_mid = get_midpoint_uv_list(uv_list)
        return distance_2d(pts_mid, img_mid)

    def get_tracking_pts_roi(self, uv_list, img_size, trunc=False):
        """
        Get the coordinates of region of interest about the tracking points
        """
        img_width, img_height = img_size
        roi_width, roi_height = self.tracking_pts_roi_size
        u_mid, v_mid = get_midpoint_uv_list(uv_list)
        u_min = int(math.floor(u_mid - roi_width / 2))
        v_min = int(math.floor(v_mid - roi_height / 2))
        u_max = u_min + roi_width
        v_max = v_min + roi_height
        return u_min, v_min, u_max - u_min, v_max - v_min

    def get_sorted_uv_points(self, blobs_list):
        """
        For blob lists with three blobs finds the identities of the blobs based on
        colinearity and distance between the points.
        """
        assert len(blobs_list) == 3, 'blobs list must contain only thre blobs'
        # Get x and y point lists
        u_list = [blob['centroid_x'] for blob in blobs_list]
        v_list = [blob['centroid_y'] for blob in blobs_list]

        # Determine x and y span
        u_min, u_max = min(u_list), max(u_list)
        v_min, v_max = min(v_list), max(v_list)
        u_span = u_max - u_min
        v_span = v_max - v_min

        # Use max span to sort points
        if u_span >= v_span:
            uv_list = zip(u_list, v_list)
            uv_list.sort()
            u_list = [u for u, v in uv_list]
            v_list = [v for u, v in uv_list]
        else:
            vu_list = zip(v_list, u_list)
            vu_list.sort()
            u_list = [u for v, u in vu_list]
            v_list = [v for v, u in vu_list]

        # Look at distances and sort so that either the large gap occurs first
        # or last based on the spacing data.
        uv_list = zip(u_list, v_list)
        dist_0_to_1 = distance_2d(uv_list[0], uv_list[1])
        dist_1_to_2 = distance_2d(uv_list[1], uv_list[2])
        if self.large_step_first:
            if dist_0_to_1 <= dist_1_to_2:
                uv_list.reverse()
        else:
            if dist_0_to_1 >= dist_1_to_2:
                uv_list.reverse()
        return uv_list

    def associate_data_w_seq(self):
        """
        Associate tracking data with acquisition sequence number.

        Note, this is only used when the subscription topic type is
        sensor_msgs/Image.
        """
        with self.lock:
            for stamp, data in self.stamp_to_data.items():
                try:
                    seq = self.stamp_to_seq[stamp]
                    seq_found = True
                except KeyError:
                    seq_found = False

                if seq_found:
                    self.seq_to_data[seq] = data
                    del self.stamp_to_data[stamp]
                    del self.stamp_to_seq[stamp]
                else:
                    # Throw away data greater than the maximum allowed age
                    if self.latest_stamp is not None:
                        latest_stamp_secs = stamp_tuple_to_secs(
                            self.latest_stamp)
                        stamp_secs = stamp_tuple_to_secs(stamp)
                        stamp_age = latest_stamp_secs - stamp_secs
                        if stamp_age > self.max_stamp_age:
                            del self.stamp_to_data[stamp]

    def publish_tracking_pts(self):
        """
        Creates tracking_pts message and publishes it. 
        """
        for seq, data in sorted(self.seq_to_data.items()):

            # Create list of tracking points
            tracking_pts = []
            for u, v in data['tracking_pts']:
                tracking_pts.append(Point2d(u, v))

            # Create the tracking points message and publish
            tracking_pts_msg = ThreePointTrackerRaw()
            tracking_pts_msg.data.seq = seq
            tracking_pts_msg.data.secs = data['stamp'][0]
            tracking_pts_msg.data.nsecs = data['stamp'][1]
            tracking_pts_msg.data.camera = self.camera
            tracking_pts_msg.data.found = data['found']
            tracking_pts_msg.data.distance = data['dist_to_image_center']
            tracking_pts_msg.data.roi = data['tracking_pts_roi']
            tracking_pts_msg.data.roi_src = data['tracking_pts_roi_src']
            tracking_pts_msg.data.roi_dst = data['tracking_pts_roi_dst']
            tracking_pts_msg.data.points = tracking_pts
            tracking_pts_msg.image = data['image_tracking_pts']
            self.tracking_pts_pub.publish(tracking_pts_msg)
            #print('publish', seq)

            # Remove data for this sequence number.
            del self.seq_to_data[seq]

    def run(self):
        """
        Main loop - associates tracking data and time stamps  w/ image sequence
        numbers and publishes the tracking data.
        """
        while not rospy.is_shutdown():
            if self.topic_type == 'sensor_msgs/Image':
                self.associate_data_w_seq()
            self.publish_tracking_pts()