def __init__(self):
        rospy.init_node('face_recognizer', anonymous=True)
        self.image_scraper_enable = rospy.get_param('image_scraper')
        if self.image_scraper_enable != 0:
            from image_scraper import image_scaper
            self.user_agents = rospy.get_param('user_agents')
            self.image_scraper = image_scaper(self.user_agents)

        self.face_event_topic = rospy.get_param('publish_topic_event')
        self.openface_loc = rospy.get_param('openface')
        self.camera_topic = rospy.get_param('recognition_topic')
        self.filtered_face_locations = rospy.get_param('filtered_face_locations')
        self.shape_predictor_file = rospy.get_param("shape_predictor")
        self.image_dir = rospy.get_param("image_locations")
        self.face_recognizer = face_recognizer(self.openface_loc, self.image_dir)
        self.bridge = CvBridge()
        self.event_pub = rospy.Publisher( self.face_event_topic, FaceEvent, queue_size=10 )
        self.image_sub = message_filters.Subscriber(self.camera_topic, Image)
        self.cmt_sub = message_filters.Subscriber('tracker_results',Trackers)
        self.face_sub = message_filters.Subscriber(self.filtered_face_locations, Objects)
        self.temp_sub = message_filters.Subscriber('temporary_trackers',Trackers)
        ts = message_filters.ApproximateTimeSynchronizer([self.image_sub, self.cmt_sub,self.face_sub,self.temp_sub], 1,0.25)
        ts.registerCallback(self.callback)
        self.srv = Server(RecognitionConfig, self.sample_callback)
        self.faces_cmt_overlap = {}
        self.logger = logging.getLogger('hr.cmt_tracker.face_reinforcer_node')
        #This would hold the trackers that the system would hold.
        self.save_tracker_images = []
        #This query x results of this particular tracking instances.
        self.get_tracker_results = []

        self.service_ = rospy.Service('add_to_query', TrackerNames, self.add_to_query)
        self.cmt_state = {}
        # self.confidence = 0.85
        self.states = ['results','save']
        if not self.face_recognizer.get_state():
            print ("No Classifier")
            query_only = rospy.get_param('query_only', True)
            if query_only:
                print("Operating in Query Mode There is Model so No Queries would happen")
                rospy.signal_shutdown("Query Mode; No Model Found. Shutting Down Node")
        else:
            print ("Classifier Found")

        self.state ={'query_save': '00', 'save_only': '01','query_only': '10', 'ignore': '11'}
        #format
        #tracker_name : {state: state in self.state or 'query_save', count: int}
        self.cmt_tracker_instances = {}

        self.overall_state = ''
        self.initial_run = True
        self.google_query = []
Exemple #2
0
    def __init__(self):
        self.save_faces = False
        rospy.init_node('offline_viewer', anonymous=True)
        self.openface_loc = rospy.get_param('openface')
        self.camera_topic = rospy.get_param('recognition_topic')
        self.filtered_face_locations = rospy.get_param('filtered_face_locations')
        self.shape_predictor_file = rospy.get_param("shape_predictor")
        self.image_dir = rospy.get_param("image_locations")
        self.face_recognizer = face_recognizer(self.openface_loc, self.image_dir)
        self.tracker_locations_pub = rospy.Publisher("tracking_locations", Trackers, queue_size=5)
        self.bridge = CvBridge()
        self.image_sub = message_filters.Subscriber(self.camera_topic, Imge)
        self.face_sub = message_filters.Subscriber(self.filtered_face_locations, Objects)
        ts = message_filters.ApproximateTimeSynchronizer([self.image_sub, self.face_sub],
                                                         10, 0.25)
        ts.registerCallback(self.callback)
        Server(RecogntionConfig, self.sample_callback)
        #Set up GUI
        self.window = tk.Tk()  #Makes main window
        self.window.wm_title("Offline Vizualizer For OpenFace ")
        self.window.config(background="#FFFFFF")

        #Graphics window
        imageFrame = tk.Frame(self.window, width=600, height=500)
        imageFrame.grid(row=0, column=0, padx=10, pady=2)

        #Capture video frames
        self.lmain = tk.Label(imageFrame)
        self.lmain.grid(row=0, column=0)

        buttonsFrame = tk.Frame(self.window,width=600, height=20)
        buttonsFrame.grid(row = 600, column=0, padx=10, pady=2)

        liveResultsButton = tk.Button(buttonsFrame,text="Live Results", fg="brown",command= self.switchToLive)
        liveResultsButton.pack(side = tk.LEFT)

        saveFacesButton = tk.Button(buttonsFrame, text="Save Faces", fg="brown", command = self.switchToSave)
        saveFacesButton.pack(side = tk.LEFT)

        trainFacesButton = tk.Button(buttonsFrame,text="Train Faces", fg="brown", command = self.switchToTrain)
        trainFacesButton.pack(side = tk.LEFT)

        trimFacesButton = tk.Button(buttonsFrame, text="Trim Faces", fg="brown", command = self.trimTrainingSet)
        trimFacesButton.pack(side=tk.LEFT)

        #add buttons
        self.new_faces_array = []
        self.window.mainloop()  # Starts GUI
Exemple #3
0
    def __init__(self):
        rospy.init_node('face_recognizer', anonymous=True)
        self.openface_loc = rospy.get_param('openface')
        self.camera_topic = rospy.get_param('camera_topic')
        self.filtered_face_locations = rospy.get_param('filtered_face_locations')
        self.shape_predictor_file = rospy.get_param("shape_predictor")
        self.image_dir = rospy.get_param("image_locations")
        self.face_recognizer = face_recognizer(self.openface_loc, self.image_dir)
        self.bridge = CvBridge()
        self.image_sub = message_filters.Subscriber(self.camera_topic, Image)
        self.cmt_sub = message_filters.Subscriber('tracker_results',Trackers)
        self.face_sub = message_filters.Subscriber(self.filtered_face_locations, Objects)
        self.temp_sub = message_filters.Subscriber('temporary_trackers',Trackers)
        ts = message_filters.ApproximateTimeSynchronizer([self.image_sub, self.cmt_sub,self.face_sub,self.temp_sub], 1,0.25)
        ts.registerCallback(self.callback)
        self.srv = Server(RecogntionConfig, self.sample_callback)
        self.faces_cmt_overlap = {}
        self.logger = logging.getLogger('hr.cmt_tracker.face_reinforcer_node')
        #This would hold the trackers that the system would hold.
        self.save_tracker_images = []
        #This query x results of this particular tracking instances.
        self.get_tracker_results = []

        self.service_ = rospy.Service('add_to_query', TrackerNames, self.add_to_query)
        self.cmt_state = {}
        self.confidence = 0.85
        self.states = ['results','save']


        self.state ={'query_save': '00', 'save_only': '01','query_only': '10', 'ignore': '11'}
        #format
        #tracker_name : {state: state in self.state or 'query_save', count: int}
        self.cmt_tracker_instances = {}

        self.overall_state = ''
        self.initial_run = True
Exemple #4
0
    def __init__(self):
        self.save_faces = False
        rospy.init_node('offline_viewer', anonymous=True)
        self.openface_loc = rospy.get_param('openface')
        self.camera_topic = rospy.get_param('recognition_topic')
        self.filtered_face_locations = rospy.get_param(
            'filtered_face_locations')
        self.shape_predictor_file = rospy.get_param("shape_predictor")
        self.image_dir = rospy.get_param("image_locations")
        self.face_recognizer = face_recognizer(self.openface_loc,
                                               self.image_dir)
        self.tracker_locations_pub = rospy.Publisher("tracking_locations",
                                                     Trackers,
                                                     queue_size=5)
        self.bridge = CvBridge()
        self.image_sub = message_filters.Subscriber(self.camera_topic, Imge)
        self.face_sub = message_filters.Subscriber(
            self.filtered_face_locations, Objects)
        ts = message_filters.ApproximateTimeSynchronizer(
            [self.image_sub, self.face_sub], 10, 0.25)
        ts.registerCallback(self.callback)
        Server(RecognitionConfig, self.sample_callback)
        #Set up GUI
        self.window = tk.Tk()  #Makes main window
        self.window.wm_title("Offline Vizualizer For OpenFace ")
        self.window.config(background="#FFFFFF")

        #Graphics window
        imageFrame = tk.Frame(self.window, width=600, height=500)
        imageFrame.grid(row=0, column=0, padx=10, pady=2)

        #Capture video frames
        self.lmain = tk.Label(imageFrame)
        self.lmain.grid(row=0, column=0)

        buttonsFrame = tk.Frame(self.window, width=600, height=20)
        buttonsFrame.grid(row=600, column=0, padx=10, pady=2)

        liveResultsButton = tk.Button(buttonsFrame,
                                      text="Live Results",
                                      fg="brown",
                                      command=self.switchToLive)
        liveResultsButton.pack(side=tk.LEFT)

        saveFacesButton = tk.Button(buttonsFrame,
                                    text="Save Faces",
                                    fg="brown",
                                    command=self.switchToSave)
        saveFacesButton.pack(side=tk.LEFT)

        trainFacesButton = tk.Button(buttonsFrame,
                                     text="Train Faces",
                                     fg="brown",
                                     command=self.switchToTrain)
        trainFacesButton.pack(side=tk.LEFT)

        trimFacesButton = tk.Button(buttonsFrame,
                                    text="Trim Faces",
                                    fg="brown",
                                    command=self.trimTrainingSet)
        trimFacesButton.pack(side=tk.LEFT)

        #add buttons
        self.new_faces_array = []
        self.window.mainloop()  # Starts GUI