Exemple #1
0
def check_regions_and_pairs():
    """
    Make sure the tracking regions and camera pairs are well defined.
    """
    regions_dict = file_tools.read_tracking_2d_regions()
    camera_pairs_dict = file_tools.read_tracking_2d_camera_pairs()
    region_tools.check_regions_and_camera_pairs(regions_dict, camera_pairs_dict)
Exemple #2
0
def stitched_image_labelers_ready():
    """
    Checks to see if the stitched image labelers are ready. They are considered
    ready if a image_stitched_labeled topic is being published for every
    tracking region.
    """
    regions_dict = file_tools.read_tracking_2d_regions()
    number_of_regions = len(regions_dict)
    number_of_labeled = len(find_topics_w_ending('image_stitched_labeled'))
    if number_of_regions == number_of_labeled:
        return True
    else:
        return False
Exemple #3
0
def stitched_images_ready():
    """
    Check to see if the stitched images topics are ready. There should be one
    per region.
    """
    regions_dict = file_tools.read_tracking_2d_regions() 
    num_regions = len(regions_dict)
    num_stitched = get_number_of_stitched_image_topics()
    num_seq_and_stitched = get_number_of_seq_and_image_stitched_topics()
    if num_regions == num_stitched and num_regions == num_seq_and_stitched:
        return True
    else:
        return False
Exemple #4
0
def three_point_tracker_synchronizers_ready():
    """
    Checks to see if the three point tracker synchronizers are ready. They are 
    considered to be ready if the tracking_pts and image_tracking_pts topics are
    being published for every tracking region.
    """
    regions_dict = file_tools.read_tracking_2d_regions()
    num_regions = len(regions_dict)
    num_tracking_pts = sum([len(find_region_tracking_pts_topics(r)) for r in regions_dict])
    num_image_tracking_pts = sum([len(find_region_image_tracking_pts_topics(r)) for r in regions_dict])
    if num_regions == num_tracking_pts and num_regions == num_image_tracking_pts:
        return True
    else:
        return False
Exemple #5
0
    def __init__(self):
        self.regions_dict = file_tools.read_tracking_2d_regions()
        self.camera_pairs_dict = file_tools.read_tracking_2d_camera_pairs()

        # Create dictionaries describing tranformation setup
        self._create_camera_to_region_dict()
        self._create_tracking_plane_to_region_dict()
        self._create_region_to_anchor_plane_dict()
        self._create_tracking_to_anchor_plane_dict()
        self._create_camera_to_image_size_dict()
        self._get_stitching_params()

        # Create transformation dictionaries
        self._create_tracking_plane_to_transform_dict()
        self._create_camera_to_transform_dict()
Exemple #6
0
def three_point_trackers_ready():
    """
    Checks to see if the three point trackers are ready. They are considered ready
    if for every camera in a tracking 
    """
    regions_dict = file_tools.read_tracking_2d_regions()
    camera_set = set()
    for region, camera_list in regions_dict.iteritems():
        camera_set.update(camera_list)
    num_cameras = len(camera_set)
    num_tracking_pts = len(find_camera_tracking_pts_topics())
    num_image_tracking_pts =len(find_camera_image_tracking_pts_topics())
    if num_cameras == num_tracking_pts and num_cameras == num_image_tracking_pts:
        return True
    else:
        return False
Exemple #7
0
def setup_redis_db():
    # Create db and add empty camera assignment
    db = redis.Redis('localhost', db=config.redis_db)

    # Add regions dictionary
    regions_dict = file_tools.read_tracking_2d_regions()
    redis_tools.set_dict(db, 'regions_dict', regions_dict)

    # Add extra video dictionary
    extra_video_dict = file_tools.read_logging_extra_video()
    redis_tools.set_dict(db, 'extra_video_dict', extra_video_dict)

    # Add mjpeg info dictionary
    mjpeg_info_dict = mjpeg_servers.get_mjpeg_info_dict()
    redis_tools.set_dict(db, 'mjpeg_info_dict', mjpeg_info_dict)

    # Add external network interface
    machine_def = mct_introspection.get_machine_def()
    ip_iface_ext = iface_tools.get_ip_addr(
        machine_def['mct_master']['iface-ext'])
    redis_tools.set_str(db, 'ip_iface_ext', ip_iface_ext)

    # Set operating mode
    redis_tools.set_str(db, 'current_mode', 'preview')

    ## Get frame rates
    frame_rate_dict = file_tools.read_frame_rates()
    redis_tools.set_dict(db, 'frame_rate_dict', frame_rate_dict)

    frame_rate = frame_rate_dict['tracking_2d']

    # Get logging directory  - add base path to directory, and create if it doesn't exist
    logging_params_dict = file_tools.read_logging_params()
    logging_params_dict['directory'] = os.path.join(
        os.environ['HOME'], logging_params_dict['directory'])
    if not os.path.exists(logging_params_dict['directory']):
        os.mkdir(logging_params_dict['directory'])
    redis_tools.set_dict(db, 'logging_params_dict', logging_params_dict)

    # Get lighting params
    mightex_params_dict = file_tools.read_mightex_params()
    lighting_params_dict = mightex_to_lighting_params(mightex_params_dict)

    redis_tools.set_dict(db, 'lighting_params_dict', lighting_params_dict)

    return db
Exemple #8
0
    def __init__(self, region, max_seq_age=200):
        self.lock = threading.Lock()

        self.region = region
        regions_dict = file_tools.read_tracking_2d_regions()
        self.camera_list = regions_dict[region]
        self.camera_list.sort()
        self.create_camera_to_tracking_dict()
        self.latest_seq = None
        self.max_seq_age = max_seq_age
        self.tracking_pts_pool = {}

        self.tracking_pts_roi_size = rospy.get_param(
            '/three_point_tracker_params/roi_size', (150, 150))

        # Color and font for tracking points image
        self.magenta = (255, 255, 0)
        self.cv_text_font = cv.InitFont(cv.CV_FONT_HERSHEY_TRIPLEX,
                                        0.6,
                                        0.6,
                                        thickness=0)

        # Font for PIL tracking info image
        self.info_image_size = (180, 100)
        self.font = PILImageFont.truetype(
            "/usr/share/fonts/truetype/ubuntu-font-family/Ubuntu-B.ttf", 16)

        # Get transforms from cameras to tracking and stitched image planes
        self.tf2d = transform_2d.Transform2d()

        self.bridge = CvBridge()
        self.ready = False
        rospy.init_node('three_point_tracker_synchronizer',
                        log_level=rospy.DEBUG)

        # Subscribe to raw tracking pts topics
        self.tracking_pts_sub = {}
        for camera, topic in self.camera_to_tracking.iteritems():
            handler = functools.partial(self.tracking_pts_handler, camera)
            self.tracking_pts_sub[camera] = rospy.Subscriber(
                topic, ThreePointTrackerRaw, handler)

        # Create publishers
        self.tracking_pts_pub = rospy.Publisher('tracking_pts',
                                                ThreePointTracker)
        self.image_tracking_pts = None
        self.image_tracking_pts_pub = rospy.Publisher('image_tracking_pts',
                                                      Image)
        self.image_tracking_info_pub = rospy.Publisher('image_tracking_info',
                                                       Image)

        # Setup rand sync service
        rospy.wait_for_service('/get_rand_sync_signal')
        self.rand_sync_srv = rospy.ServiceProxy('/get_rand_sync_signal',
                                                GetRandSyncSignal)

        # Setup reset service - needs to be called anytime the camera trigger is
        # stopped - before it is restarted. Empties buffers of images and sequences.
        self.reset_srv = rospy.Service('reset_tracker_synchronizer', Empty,
                                       self.reset_handler)
        self.ready = True
Exemple #9
0
from mct_frame_drop_corrector import frame_drop_corrector_master
from mct_frame_drop_corrector import frame_drop_corrector
from mct_image_stitcher import image_stitcher_master
from mct_tracking_2d import three_point_tracker_master
from mct_tracking_2d import stitched_image_labeler_master
from mct_avi_writer import avi_writer_master
from mct_logging import tracking_pts_logger_master
from mct_rand_sync import reset_rand_sync

# Startup parameters
ready_poll_dt = 0.2  # Sleep time for polling loop when waiting for node or topic to start
camera_startup_t = 10.0  # Sleep interval for waiting for camera processes to startup
buffer_flush_t = 2.0  # Sleep interval for flushing buffers

# Get Startup configuration information
regions_dict = file_tools.read_tracking_2d_regions()
extra_video_dict = file_tools.read_logging_extra_video()
camera_pairs_dict = file_tools.read_tracking_2d_camera_pairs()
region_tools.check_regions_and_camera_pairs(regions_dict, camera_pairs_dict)


def debug_print(*args, **kwarg):
    debug = kwarg.pop('debug')
    if debug:
        print(*args, **kwarg)


def tracking_2d_node_startup(debug=False):

    # Stop camera triggers
    debug_print(' * stopping camera triggers ... ', end='', debug=debug)
Exemple #10
0
    def __init__(
            self, 
            region, 
            topic_end='image_rect_skip', 
            topic_type='sensor_msgs/Image', 
            max_seq_age=150, 
            max_stamp_age=1.5
            ):

        self.topic_end = topic_end
        self.topic_type = topic_type
        self.max_seq_age = max_seq_age
        self.max_stamp_age = max_stamp_age

        self.warp_flags = cv.CV_INTER_LINEAR 
        self.stitching_params = file_tools.read_tracking_2d_stitching_params()

        rospy.init_node('image_stitcher')
        self.ready = False
        self.is_first_write = True
        self.stitched_image = None
        self.seq_to_images = {}  
        self.stamp_to_seq_pool= {}
        self.image_waiting_pool = {} 

        # Information on the latest sequence received and stitched
        self.seq_newest = None
        self.stamp_newest = None

        self.lock = threading.Lock()
        self.bridge = CvBridge()

        # Get image and camera information 
        regions_dict = file_tools.read_tracking_2d_regions()
        self.region = region
        self.camera_list = regions_dict[region]
        self.create_camera_to_image_dict()
        self.create_camera_to_info_dict()

        # Get transforms from cameras to stitched image and stitched image size
        self.tf2d = transform_2d.Transform2d()
        self.create_tf_data()
        self.get_stitched_image_size()

        # Subscribe to topics based on incoming topic type.
        if self.topic_type == 'sensor_msgs/Image':
            
            # Create pool dictionaries for incomming data.  
            for camera in self.camera_list:
                self.stamp_to_seq_pool[camera] = {}
                self.image_waiting_pool[camera] = {}

            # Subscribe to camera info topics
            self.info_sub = {} 
            for camera, topic  in  self.camera_to_info.iteritems():
                info_handler = functools.partial(self.info_handler, camera)
                self.info_sub[camera] = rospy.Subscriber(topic, CameraInfo, info_handler)

            # Subscribe to rectified image topics
            self.image_sub = {} 
            for camera, topic  in  self.camera_to_image.iteritems():
                image_handler = functools.partial(self.image_handler, camera)
                self.image_sub[camera] = rospy.Subscriber(topic, Image, image_handler)

        elif self.topic_type == 'mct_msg_and_srv/SeqAndImage':

            # Subscribe to SeqAndImage topics
            self.seq_and_image_sub = {}
            for camera, topic in self.camera_to_image.iteritems():
                seq_and_image_handler = functools.partial(self.seq_and_image_handler, camera)
                self.seq_and_image_sub[camera] = rospy.Subscriber(topic, SeqAndImage, seq_and_image_handler)

        else:
            err_msg = 'unable to handle topic type: {0}'.format(self.topic_type)
            raise ValueError, err_msg

        # Stitched image publisher and seq publisher
        self.image_pub = rospy.Publisher('image_stitched', Image)
        self.image_and_seq_pub = rospy.Publisher('seq_and_image_stitched', SeqAndImage)
        self.seq_pub = rospy.Publisher('image_stitched/seq', UInt32)
        self.stamp_pub = rospy.Publisher('image_stitched/stamp_info', StampInfo)
        self.processing_dt_pub = rospy.Publisher('image_stitched/processing_dt', ProcessingInfo)

        # Setup reset service - needs to be called anytime the camera trigger is  
        # stopped - before it is restarted. Empties buffers of images and sequences. 
        self.reset_srv = rospy.Service('reset_image_stitcher', Empty, self.handle_reset_srv)
        self.ready = True