Пример #1
0
def main(face_recognition_method: str = None):
    webcam = cv.VideoCapture(0)

    data_aquisition = DataAquisition(dataset_dir='images/training/',
                                     resources_dir='resources/')

    known_people = data_aquisition.create_dataset(webcam)

    if len(known_people) == 1:
        print('Should have any face to recognition')
        return

    print('Training model...')
    data_aquisition.train(face_recognition_method)

    print('Initialzing face recognizer...')
    face_recognizer = FaceRecognizer(resources_dir='resources/',
                                     recognizer_name=face_recognition_method)

    while True:
        if cv.waitKey(1) & 0xFF == ord('q'):
            break

        _, c_frame = webcam.read()

        c_frame, face_locations, labels, factors = face_recognizer.predict(
            c_frame, known_people)

        cv.imshow('webcam', c_frame)

    webcam.release()
    cv.destroyAllWindows()
Пример #2
0
    def __init__(self):
        self.node_name = rospy.get_name()
        rospy.loginfo("[%s] Initializing......" % (self.node_name))

        self.bridge = CvBridge()
        self.visualization = True

        self.image_msg = None
        self.pub_detections = rospy.Publisher("~image_face",
                                              CompressedImage,
                                              queue_size=1)
        self.recognizer = FaceRecognizer(scale=3)

        rospy.Service('~detect_face_locations', GetFaceDetections,
                      self.cbDetectFaceLocations)
        rospy.Service('~detect_face_labels', GetFaceDetections,
                      self.cbDetectFaceLabels)
        rospy.Service('~list_face_labels', GetStrings, self.cbListFaceLabels)
        rospy.Service('~add_face_label', SetString, self.cbAddFaceLabel)
        rospy.Service('~remove_face_label', SetString, self.cbRemoveFaceLabel)
        # self.sub_image = rospy.Subscriber("~image_raw", Image, self.cbImg , queue_size=1)
        self.sub_image = rospy.Subscriber("~image_raw/compressed",
                                          CompressedImage,
                                          self.cbImg,
                                          queue_size=1)

        # rospy.loginfo("[%s] wait_for_service : camera_get_frame..." % (self.node_name))
        # rospy.wait_for_service('~camera_get_frame')
        # self.get_frame = rospy.ServiceProxy('~camera_get_frame', GetFrame)
        rospy.loginfo("[%s] Initialized." % (self.node_name))
Пример #3
0
 def memorize_face(self):
     """ Learn face model.
     Returns:
     """
     # FaceRecognizer.fit(n_epoch=5)
     FaceRecognizer.fit(
         n_epoch=self.N_EPOCH, batch_size=self.BATCH_SIZE,
         image_size=self.RECOGNITION_IMAGE_SIZE,)
Пример #4
0
 def initialize(self):
     if self.network_name == 'YOLO':
         self.network = YOLO_TF()
     elif self.network_name == 'SSD-mobilenet-face':
         self.network = ssd_mobilenet_face_wrapper.ssd_mobilenet_face(
             self.threshold)
         self.faceRecognizer = FaceRecognizer()
     else:
         raise Exception('Network name not recognized')
Пример #5
0
 def run_animation(self):
     """
     Run the animation of the face
     :return:
     """
     self.runText.set("Press 'Esc' to stop")
     self.rc = FaceRecognizer(self.root, self.canvas, self.runText,
                              self.runBtn, self.chatVar)
     self.rc.start()
     self.runText.set("Start Animation")
Пример #6
0
def history_recollection():
    history = FaceRecognizer(user_interface)
    history.build_imagecsv()
    user_number = history.RecognizeFace()
    user_name = history.names[user_number]

    if user_name is None:
        #print "did not recognize user ", user_name
        chatbot_response = "I don't think we've met before, what's your name?"
        user_interface.update_sprites(chatbot_response, " ".join(("Emotion: ", meeting_emotion)), " ".join(("User: "******"Unknown")), "Primary Topics: ")
        user_interface.render()
        text_to_speech(chatbot_response)
        user_name = speech.recognize_speech()
        user_interface.update_sprites(chatbot_response, " ".join(("Emotion: ", meeting_emotion)), " ".join(("User: "******"Primary Topics: ")
        user_interface.render()
        #print "Name entered as: ", user_name
        history.retrain(user_name)

    else:
        #print "recognized user ", user_name
        chatbot_response = "It's good to see you again, " + user_name
        user_interface.update_sprites(chatbot_response, " ".join(("Emotion: ", meeting_emotion)), " ".join(("User: "******"Primary Topics: ")
        user_interface.render()
        text_to_speech(chatbot_response)

    history.exit()
    return user_name
Пример #7
0
    def __init__(self, url, faces_path, tolerance):
        self.url = url

        # load stream video
        self.camera = cv2.VideoCapture(self.url)
        if self.camera.isOpened() is False:
            print("Error: Failed to open the video source.")

        self.video_frame = self.camera.get(cv2.CAP_PROP_FRAME_COUNT)
        self.video_fps = self.camera.get(cv2.CAP_PROP_FPS)
        self.image_x = int(self.camera.get(cv2.CAP_PROP_FRAME_WIDTH))
        self.image_y = int(self.camera.get(cv2.CAP_PROP_FRAME_HEIGHT))

        self.face_recognizer = FaceRecognizer(faces_path, tolerance)
Пример #8
0
 def make_one_to_more_test(self, max_size):
     self._load_dataset_more(max_size)
     for i, (picture, name) in enumerate(self.dataset_more_base):
         # print(picture)
         try:
             self.face_recognizer.fingerprints[
                 name + "?" + str(i)] = self._get_face_feature(picture)
         except:
             pass
     # print("ok")
     correct_num = 0
     cnt = 0
     for picture, name in self.dataset_more_test:
         # print(f"{cnt} / {len(self.dataset_more_test)}")
         cnt += 1
         feature = self._get_face_feature(picture)
         similarity_max = 0
         name_of_max = ""
         for p, vector in self.face_recognizer.fingerprints.items():
             similarity = FaceRecognizer._cosine_similarity(feature, vector)
             if similarity > similarity_max:
                 name_of_max = p
                 similarity_max = similarity
         # print(f"{similarity_max}, {nam    e_of_max}, {p}")
         if name_of_max.startswith(name):
             correct_num += 1
     return correct_num / len(self.dataset_more_test)
Пример #9
0
def test_gender_recognizer(args):
    print 'Identity Recognizer:'
    input_shape = (args.image_size, args.image_size, args.num_channels)
    print 'Input shape:', input_shape
    X, y = get_30_people_chunk(args.image_path, 0, gender_label=True)

    print 'images shape:', X.shape
    print 'labels shape:', y.shape

    identity_recognizer = FaceRecognizer(args.model_path + '_gender',
                                         y.shape[1], input_shape,
                                         args.num_channels)

    identity_recognizer.model.compile(loss=TEST_SOFTMAX,
                                      optimizer=TEST_SGD,
                                      metrics=['accuracy'])

    y_pred = identity_recognizer.model.predict(X)
    y_pred_ct = np.argmax(y_pred, axis=1)
    y_true_ct = np.argmax(y, axis=1)
    acc = np.sum(y_pred_ct == y_true_ct) / y_true_ct.shape[0]

    print '\tAccuracy on training data: %.4f' % acc

    X_val, y_val = get_30_people_chunk(args.image_path, 1, gender_label=True)
    y_pred = identity_recognizer.model.predict(X_val)
    y_pred_ct = np.argmax(y_pred, axis=1)
    y_true_ct = np.argmax(y_val, axis=1)
    acc = np.sum(y_pred_ct == y_true_ct) / y_true_ct.shape[0]

    print '\tAccuracy on validation data: %.4f' % acc
Пример #10
0
 def initialize(self):
     if self.network_name == 'YOLO':
         self.network = YOLO_TF()
     elif self.network_name == 'SSD-brainlab':
         self.brainlab_args = UpdatePredictConfiguration()
         assert self.brainlab_args.batch_size == 1, 'Batch size must be 1'
         self.network = BrainLabNet.BrainLabNet(self.brainlab_args,
                                                'interactive')
         self.network.start_interactive_session(self.brainlab_args)
         self.faceRecognizer = FaceRecognizer()
     elif self.network_name == 'SSD-mobilenet-face':
         self.network = ssd_mobilenet_face_wrapper.ssd_mobilenet_face(
             self.threshold)
         self.faceRecognizer = FaceRecognizer()
     else:
         raise Exception('Network name not recognized')
Пример #11
0
class IPCamera:
    """
    Face recognition for IPCamera
    """

    def __init__(self, url, faces_path, tolerance):
        self.url = url

        # load stream video
        self.camera = cv2.VideoCapture(self.url)
        if self.camera.isOpened() is False:
            print("Error: Failed to open the video source.")

        self.video_frame = self.camera.get(cv2.CAP_PROP_FRAME_COUNT)
        self.video_fps = self.camera.get(cv2.CAP_PROP_FPS)
        self.image_x = int(self.camera.get(cv2.CAP_PROP_FRAME_WIDTH))
        self.image_y = int(self.camera.get(cv2.CAP_PROP_FRAME_HEIGHT))

        self.face_recognizer = FaceRecognizer(faces_path, tolerance)

    def __del__(self):
        self.camera.release()

    def get_frame(self):
        """ Returns estimated image """
        ret, frame = self.camera.read()
        if not ret:
            return None

        res_img = self.face_recognizer.recognize(frame)

        _, jpeg = cv2.imencode('.jpg', res_img)
        return jpeg.tobytes()
Пример #12
0
    def __init__(self, args: Dict):
        # todo proper handling of self.modes
        self.ie = IECore()
        self.modes = self.__determine_processing_mode(args)
        net_face_detect = net_landmarks_detect = net_recognize_face = None

        if not self.modes['detect']:
            raise ValueError('detection model undefined')
        # load networks from file
        net_face_detect = self.__prepare_network(args['detection_model'])
        # put it to corresponding class
        # self.face_locator = FaceLocator(net_face_detect, args['detection_model_threshold'])
        self.face_locator = FaceLocator(net_face_detect,
                                        args['detection_model_threshold'],
                                        NetworkType(args['detection_model']))
        # setup device plugins
        if next(iter(args['device'])) == 'CPU':
            # CPU
            self.ie.set_config(config={
                "CPU_THROUGHPUT_STREAMS": "1",
                "CPU_THREADS_NUM": "8",
            },
                               device_name='CPU')
        elif next(iter(args['device'])) == 'GPU':
            # GPU
            pass
            self.ie.set_config(config={"GPU_THROUGHPUT_STREAMS": "1"},
                               device_name='GPU')
        elif next(iter(args['device'])) == 'MYRIAD':
            pass
        # load to device for inferencing
        self.face_locator.deploy_network(next(iter(args['device'])), self.ie)

        if self.modes['landmark']:
            net_landmarks_detect = self.__prepare_network(
                args['landmarks_model'])
            self.landmarks_locator = LandmarksLocator(net_landmarks_detect)
            self.landmarks_locator.deploy_network(next(iter(args['device'])),
                                                  self.ie)

        if self.modes['recognize']:
            net_recognize_face = self.__prepare_network(
                args['recognition_model'])
            self.face_recognizer = FaceRecognizer(net_recognize_face)
            self.face_recognizer.deploy_network(next(iter(args['device'])),
                                                self.ie)
Пример #13
0
class InvaderDetectorWithFace:
    def __init__(self):
        self.face_finder = FaceFinder()
        self.recognizer = FaceRecognizer()
        self.recognizer.load('trainer.yml')
        self.security_trigger = Trigger(20, lambda similarity: similarity > 80)

    def on_enabled_start(self):
        pass

    def on_disabled_update(self):
        pass

    def detect(self, frame):
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        faces = self.face_finder.find(gray)

        best_face = None
        best_confidence = 1000
        for coordinates in faces:
            region_of_interest = get_region_of_interest(gray, coordinates)
            id_, conf = self.recognizer.predict(region_of_interest)

            if conf < best_confidence:
                best_face = region_of_interest
                best_confidence = conf

            print('{}, {}, {}'.format(datetime.now(), id_, conf))

            # save_region_of_interest(gray, coordinates)
            self.highlight_face(frame, coordinates)

        if best_face is not None:
            if self.security_trigger.update(best_confidence):
                print('Face not match!')

                return True

        return False

    def highlight_face(self, image, coordinates, color=(0, 0, 255)):
        x, y, w, h = coordinates
        x_end = x + w
        y_end = y + h
        stroke = 2
        cv2.rectangle(image, (x, y), (x_end, y_end), color, stroke)
    def __init__(self):
        FaceRecognizer.__init__(self)
        logger.info('Creating AWS Rekognition client.')
        self._aws_client = boto3.client('rekognition')
        self._face_registries = self._get_aws_collections()
        self._active_face_registry = None

        # Holds current registry details
        self._registry_faces = []
        self._registry_face_names = []
        self._registry_face_ids = []

        self._detection_attributes = ['DEFAULT']
        self._detection_threshold = 80.0
        self._matching_threshold = 70.0
        # Enabling this will get more facial attributes such as age, gender.
        # self._detection_attributes = ['DEFAULT', 'ALL']
        logger.info('Created face recognizer.')
        logger.info('Existing face registries {}'.format(
            self._face_registries))
Пример #15
0
	def __init__(self):
		"""Initialize the AppEngine

		Input:  None
		Output: Creates an instance of the AppEngine.
		"""

		# init QObject
		super().__init__()

		# create an instance of the database
		self.database = Database('../data/face_data.db')

		# ensure appropriate table is created
		create_sql = "CREATE TABLE IF NOT EXISTS faces (FaceName TEXT PRIMARY KEY," \
					 "FaceEncodings BLOB NOT NULL)"
		self.database.createDatabase(create_sql)

		# create an instance of the VideoStreamer
		self.videoStreamer = VideoStreamer()
		# connect the VideoStreamer's newPiFrame signal to the processNewFrame method
		self.videoStreamer.newPiFrame.connect(self.processNewFrame)

		# create an instance of the FaceRecognizer
		self.faceRecognizer = FaceRecognizer()

		# bool which determines if facial recognition is performed on each incoming frame from the Pi
		self.performFacialRecognition = False

		# current frame sent from the Pi, used for adding a new face
		self.currentFrame = None

		# current face name and encodings, used for adding a new face
		self.currentAddFaceName = None
		self.currentAddFaceEncodings = list()

		# create an instance of the AlertObserver
		self.alertObserver = AlertObserver()

		# bool determines if an alert has already been sent (only one is sent per application run)
		self.alertSent = False
Пример #16
0
def time_fr(image_folder=None):
    embedding_file = 'embeddings.npy'
    if os.path.exists(embedding_file):
        os.remove(embedding_file)

    face_recog = FaceRecognizer(model_path='model/20180402-114759')
    if image_folder is None:
        image_path = 'faces.jpg'
        image_paths = [image_path for i in range(10)]
    else:
        image_paths = os.listdir(image_folder)
        image_paths = [os.path.join(image_folder, p) for p in image_paths if p.endswith('jpg')]

    for i, image_path in enumerate(image_paths):
        print('running: %d'%i)
        image = face_recog.read_image_sp(image_path)
        image = resize_image(image)
        det_start = time.time()
        bboxes, scores, keypoints = face_recog.detect_faces_and_keypoints(image)
        det_stop = time.time()
        print('detection time = ', det_stop - det_start)

        if len(bboxes) == 0:
            print('no face detected')
        else:
            rec_start = time.time()
            face_patches = face_recog.prepare_image_patches(image, bboxes, 160, 20)
            embeddings = face_recog.extract_embedding(face_patches)
            rec_stop = time.time()
            print('recognition time = ', rec_stop - rec_start)
    def __init__(self, detection_method, embedding_model):
        ''' function constructor

        Constructor for FaceDetector

        Args:
            detection_method (DetectionMethod): Method to use for detection
            embedding_model (FaceEmbeddingModelEnum): The model to use for generating
                            embeddings for face images

        Returns:
            None
        '''

        # load face detection engine
        self.face_detection_engine = FaceDetectionEngine(detection_method)
        self.face_recognizer = FaceRecognizer(embedding_model)
        self.embedding_image_dimensions = get_image_dimensions_for_embedding_model(embedding_model)

        self.start_time_stamp = None
        self.fps_font = ImageFont.truetype(font="/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf", size=20)
        self.face_label_font = ImageFont.truetype(font="/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf", size=20)
Пример #18
0
class Detector:
    def __init__(self, network_name, threshold=0.7):
        self.network_name = network_name
        self.threshold = threshold

    def initialize(self):
        if self.network_name == 'YOLO':
            self.network = YOLO_TF()
        elif self.network_name == 'SSD-mobilenet-face':
            self.network = ssd_mobilenet_face_wrapper.ssd_mobilenet_face(
                self.threshold)
            self.faceRecognizer = FaceRecognizer()
        else:
            raise Exception('Network name not recognized')

    def detect(self, image):
        if self.network_name == 'YOLO':
            yolo_result = self.network.detect_from_cvmat2(image)
            all_detections = []
            for i in range(len(yolo_result)):
                if yolo_result[i][0] == 'person':
                    this_detection = Detection()
                    this_detection.class_name = yolo_result[i][0]
                    this_detection.x_center = yolo_result[i][1]
                    this_detection.y_center = yolo_result[i][2]
                    this_detection.width = yolo_result[i][3]
                    this_detection.height = yolo_result[i][4]
                    this_detection.conf = yolo_result[i][5]
                    all_detections.append(this_detection)

        elif self.network_name == 'SSD-mobilenet-face':
            boxes = self.network.forward_image(image)
            all_detections = []
            for i in range(len(boxes)):
                this_detection = Detection()
                this_detection.class_name = boxes[i][0]
                this_detection.x_center = boxes[i][1]
                this_detection.y_center = boxes[i][2]
                this_detection.width = boxes[i][3] * 2
                this_detection.height = boxes[i][4] * 2
                this_detection.conf = boxes[i][5]
                this_detection.class_name = self.faceRecognizer.get_person_name(
                    image, this_detection)
                all_detections.append(this_detection)

        else:
            raise Exception('Network name not recognized')

        #print('Found ' + str(len(all_detections)) + ' detections')

        return all_detections
Пример #19
0
    def run(self):
        import dlib
        from people import People
        from gender_recognizer_tf import GenderRecognizer
        from face_recognizer import FaceRecognizer
        self.gender_recognizer = GenderRecognizer()
        self.p = People("./known_people")
        self.face_recognizer = FaceRecognizer(
            self.p.people
        )  #this is not ok , we should pass onlu self.p as object
        self.predictor = dlib.shape_predictor(
            "shape_predictor_68_face_landmarks.dat")
        self.fa = FaceAligner(self.predictor, desiredFaceWidth=160)
        self.state = IdleState()
        while True:
            label, data = self.recv()
            if label == 'video_feed':
                frame = data[0]
                face = None
                aligned_image, face, rect_nums, XY = self.face_extractor(frame)
                if face is None:
                    continue

                face_locations, face_names, percent = self.face_recognizer.recognize(
                    face)
                in_the_frame = self.__update_state(self.p.people, percent)

                if (self.state.__class__ == MatchState):
                    path, gender, age = self.gender_recognizer.recognize(
                        aligned_image, face, rect_nums, XY)
                    self.broadcast(["Match", face, in_the_frame, gender, age])
                else:
                    path, gender, age = self.gender_recognizer.recognize(
                        aligned_image, face, rect_nums, XY)
                    if path is None:
                        continue
                    print("FaceFinder path = " + path)
                    self.broadcast(["NoMatch", path])
Пример #20
0
    def __init__(self):
        #observer = Observable()
        #motion_detector = MotionObserver('Motion Detector')
        #observer.register(motion_detector)
        self.camera = cv2.VideoCapture(0)
        time.sleep(0.5)
        self.motion_detector = MotionDetector()
        self.face_detector = FaceDetector()
        self.face_recognizer = FaceRecognizer()
        self.detectingstate = DetectingMotion(self)
        self.scanningstate = Scanning(self)

        self.facestate = FacialRecognition(self)
        self.greetingstate = GreetRoommate(self)
        self.waitingstate = WaitingForTask(self)
        self.servingstate = Serving(self)
        self.state = self.detectingstate
Пример #21
0
class PersonFinder:
    def __init__(self, dataset):
        self.detector = FaceDetector()
        self.recognizer = FaceRecognizer(dataset)
        self.dataset = dataset

    def find_all(self, frame):
        rects = self.detector.get_rects(frame)
        labels = self.recognizer.get_names_from_rects(frame, rects)
        return list(zip(labels, rects))

    def open_window(self):
        cv2.namedWindow(self.dataset)

    def close_window(self):
        cv2.destroyWindow(self.dataset)

    def render(self, frame):
        copy = frame.copy()
        for name, rect in self.find_all(frame):
            y = rect[1] - 10 if rect[1] - 10 > 10 else rect[1] + 10
            cv2.rectangle(copy, (rect[0], rect[1]),
                          (rect[0] + rect[2], rect[1] + rect[3]), (0, 0, 255),
                          2)
            cv2.putText(copy, name, (rect[0], y), cv2.FONT_HERSHEY_SIMPLEX,
                        0.45, (0, 0, 255), 2)
        cv2.imshow(self.dataset, copy)
        return chr(cv2.waitKey(1) & 0xFF)

    @staticmethod
    def cycle(dataset=None, port=0):
        if dataset is None:
            dataset = input("Enter The Dataset Name: ")
        finder = PersonFinder(dataset)
        vid = cv2.VideoCapture(port)
        ok, frame = vid.read()
        finder.open_window()
        while vid.isOpened() and finder.render(frame) not in "qQ":
            ok, frame = vid.read()
        finder.close_window()
        vid.release()
Пример #22
0
 def load_face_recognition_model(self):
     # person.Person.objects().distinct('_id')
     # self.face_recognizer = FaceRecognizer.objects(
     #     n_epoch=self.N_EPOCH,
     #     batch_size=self.BATCH_SIZE,
     #     image_size=self.RECOGNITION_IMAGE_SIZE,
     #     person_ids=Person.ascendind_ids(),
     # ).first()
     # if self.face_recognizer is None:
     #     return None
     person_ids = [obj._id for obj in Person.objects]
     self.face_recognizer = FaceRecognizer(
         n_epoch=self.N_EPOCH,
         batch_size=self.BATCH_SIZE,
         image_size=self.RECOGNITION_IMAGE_SIZE,
         person_ids=person_ids,
         person_num=len(person_ids)
     )
     self.face_recognizer.generate_filename()
     print(self.face_recognizer.filename)
     self.face_recognition_model = \
         self.face_recognizer.load_model()
     return self.face_recognition_model
Пример #23
0
parser.add_argument(
    '--predictor_path',
    type=str,
    required=True,
    help=
    'location of the dlib facial landmark predictor where shape_predictor_68_face_landmarks.dat is located'
)
parser.add_argument('--gallery_path',
                    type=str,
                    required=True,
                    help='location of the gallery')
parser.add_argument('--port', type=int, default=8000, help='which port to use')
args = parser.parse_args()

face_aligner = FaceAligner(args.predictor_path)
face_recognizer = FaceRecognizer(args.gallery_path, OpenCVAlgorithm,
                                 face_aligner)
register_handler = RegisterHandler(args.gallery_path, face_aligner)
recognize_handler = RecognizeHandler(args.gallery_path, face_aligner,
                                     face_recognizer)


class S(BaseHTTPRequestHandler):
    def _set_response(self, message=None):
        self.send_response(200)
        if message is not None:
            #self.send_header('Content-type', 'text/html')
            self.send_header('Content-type', 'application/json')

        self.end_headers()

    def do_GET(self):
Пример #24
0
    def process_image(self, cv_image):
        if not self.enable:
            grey = cv.CreateImage(self.image_size, 8, 1)
            cv.CvtColor(cv_image, grey, cv.CV_BGR2GRAY)
            return grey

        self.frame_count = self.frame_count + 1
        haar = False
        # Use HAAR if no faces are tracked yet or every 5
        if (self.use_haar_only or not self.detect_box.any_trackable_faces()
            ) and self.auto_face_tracking:
            self.detect_face(cv_image)
            haar = True
        elif self.frame_count % 5 == 0:
            self.detect_face(cv_image)
            haar = True
        """ Otherwise, track the face using Good Features to Track and
        Lucas-Kanade Optical Flow """
        if not self.use_haar_only:
            for fkey in self.detect_box.faces.keys():
                face = self.detect_box.faces[fkey]
                if not face.is_trackable():
                    continue

                # Let's try to identify the person by this face.
                # Note: face is of type FaceBox
                x1, y1 = face.pt1[0], face.pt1[1]
                x2, y2 = face.pt2[0], face.pt2[1]
                pad = 10  # Just to relax the face boundary

                # crop the face
                face_cv_image = cv_image[y1:y2 + pad, x1:x2 + pad]
                # DEBUG cv.SaveImage('/tmp/face.png', face_cv_image)
                bridge = CvBridge()
                r = FaceRecognizer().infer(np.asarray(face_cv_image))
                face.name = r[0][0]

                if not face.track_box or not self.is_rect_nonzero(
                        face.track_box):
                    face.features = []
                    face.update_box(face.face_box())
                track_box = self.track_lk(cv_image, face)
                if track_box and len(track_box) != 3:
                    face.update_box(track_box)
                else:
                    face.update_box_elipse(track_box)
                """ Prune features that are too far from the main cluster """
                if len(face.features) > 0:
                    # Consider to move face class
                    ((mean_x, mean_y, mean_z), mse_xy, mse_z,
                     score) = self.prune_features(
                         min_features=face.abs_min_features,
                         outlier_threshold=self.std_err_xy,
                         mse_threshold=self.max_mse,
                         face=face)
                    if score == -1:
                        face.lost_face()
                        continue
                """ Add features if the number is getting too low """
                if len(face.features) < face.min_features:
                    face.expand_roi = self.expand_roi_init * face.expand_roi
                    self.add_features(cv_image, face)
                else:
                    face.expand_roi = self.expand_roi_init
        self.detect_box.nextFrame(haar)
        self.detect_box.publish_faces()
        return cv_image
class FaceDetector():
    ''' class FaceDetector

        Purpose: detect (and locate if present) faces in an image
    '''

    def __init__(self, detection_method, embedding_model):
        ''' function constructor

        Constructor for FaceDetector

        Args:
            detection_method (DetectionMethod): Method to use for detection
            embedding_model (FaceEmbeddingModelEnum): The model to use for generating
                            embeddings for face images

        Returns:
            None
        '''

        # load face detection engine
        self.face_detection_engine = FaceDetectionEngine(detection_method)
        self.face_recognizer = FaceRecognizer(embedding_model)
        self.embedding_image_dimensions = get_image_dimensions_for_embedding_model(embedding_model)

        self.start_time_stamp = None
        self.fps_font = ImageFont.truetype(font="/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf", size=20)
        self.face_label_font = ImageFont.truetype(font="/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf", size=20)

    # With the input frame, use the EdgeTPU Detection engine along with the
    # tflite model to detect any faces. If any faces are detected the image
    # will be updated with boxes drawn around each identified face.
    # Note that the frame_as_image object is updated itself.
    def identify_faces_in_frame(self, rgb_array, detect_only=False):
        ''' function identify_faces_in_frame

        Detect any faces that are present in the given image.
        For each detected face, call FaceRecognizer to try to
        identfy it, then draw a box around the face including
        an identification label (or "Unknown" if the face was
        not identified).

        Args:
            rgb_array (numpy.ndarray): The frame that we should try to detect
                            faces in.

        Returns:
            A PIL Image with an enclosing red box and label for each face
            detected in the given frame
        '''

        # record start of main ML processing
        self.start_time_stamp = time.monotonic()

        # Delegate the face detection to the face detection engine
        detection_start_time = time.monotonic()
        detected_faces = self.face_detection_engine.detect_faces(rgb_array)
        detection_end_time = time.monotonic()
        if PRINT_PERFORMANCE_INFO:
            print("Face detection time: {:.3f}s".format(detection_end_time - detection_start_time))

        # convert to a PIL Image
        frame_as_image = Image.fromarray(rgb_array)
        # draw a box drawn around each face detected
        self.draw_face_boxes(frame_as_image, detected_faces, detect_only)

        return frame_as_image

    # draw boxes around each identified face in the image
    def draw_face_boxes(self, frame_as_image, detected_faces, detect_only=False):
        ''' function draw_face_boxes

        For each detected face, try to identify it, then draw a
        bounding box around the face and add a label.

        Args:
            frame_as_image (PIL Image): Original full image containing the faces
            detected_faces (array of Tuples): bounding box for each detected face
                that includes: top left corner position (x,y) as well as width and height

        Returns:
            A PIL Image with the original image overlayed with the bounding boxes and labels
            for each detected face
        '''

        # We need these local variables, so turn off Lint's complaint
        # pylint: disable=too-many-locals

        draw = ImageDraw.Draw(frame_as_image)
        for face in detected_faces:
            # get the top-left and lower-right coordinates of the bounding box for the face
            x_1, y_1, width, height = tuple(face)
            x_2 = x_1 + width
            y_2 = y_1 + height

            # generate a cropped image of the face with proper size to pass to the recognizer
            cropped_face = frame_as_image.crop((x_1, y_1, x_2, y_2))
            cropped_face = cropped_face.resize(self.embedding_image_dimensions)

            # This can be uncommented and used to see exactly what the cropped image looks like
            # cropped_face.save("cropped_face.jpg")

            # bounding box around face
            draw.rectangle(((x_1, y_1), (x_2, y_2)), outline='red')

            if not detect_only:
                # run the face recognizer on the image here
                name_for_face, process_time = self.face_recognizer.get_name_for_face(cropped_face)

                if name_for_face == "":
                    name_for_face = "Unknown"

                # label the face
                face_label = name_for_face + ' {:.3f}s'.format(process_time)
                face_label_width = self.face_label_font.getsize(face_label)
                face_label_start_x = x_1 + (x_2-x_1)/2 - face_label_width[0]/2
                draw.text((face_label_start_x, y_2 + 5), face_label, fill='red', font=self.face_label_font)

        # label the current FPS as well
        annotate_text = 'Processing time: {:.3f}s'.format(time.monotonic() - self.start_time_stamp)
        draw.text((175, 10), annotate_text, fill="red", font=self.fps_font)
Пример #26
0
 def __init__(self, dataset):
     self.detector = FaceDetector()
     self.recognizer = FaceRecognizer(dataset)
     self.dataset = dataset
Пример #27
0
import os
import numpy as np
import cv2
from face_finder import FaceFinder, get_region_of_interest
from face_recognizer import FaceRecognizer

face_finder = FaceFinder()
recognizer = FaceRecognizer()
dataset_dir = os.path.join('dataset', 'train')

x_train = []
y_labels = []
label = 1


def recursive_append(path, x_train, y_labels, current_label):
    for file in os.listdir(path):
        filepath = os.path.join(path, file)
        if os.path.isdir(filepath):
            recursive_append(filepath, x_train, y_labels, current_label)
        elif file.endswith('png') or file.endswith('jpg'):
            image_array = cv2.imread(filepath)
            image_array = cv2.cvtColor(image_array, cv2.COLOR_BGR2GRAY)
            # image_array = cv2.equalizeHist(image_array)
            # normalized_image_array = np.zeros(image_array.shape)
            # normalized_image_array = cv2.normalize(image_array, normalized_image_array)
            # while True:
            #     cv2.imshow('image', image_array)
            #     if cv2.waitKey(20) & 0xFF == ord('q'):
            #         break
            faces = face_finder.find(image_array)
Пример #28
0
def create_faces_image():
    images = cv2.imread(load_face_images())
    face_recognize = FaceRecognizer(images)
    face_recognize.face_recoginaze()
Пример #29
0
class ProcessFrame:
    def __init__(self, args: Dict):
        # todo proper handling of self.modes
        self.ie = IECore()
        self.modes = self.__determine_processing_mode(args)
        net_face_detect = net_landmarks_detect = net_recognize_face = None

        if not self.modes['detect']:
            raise ValueError('detection model undefined')
        # load networks from file
        net_face_detect = self.__prepare_network(args['detection_model'])
        # put it to corresponding class
        # self.face_locator = FaceLocator(net_face_detect, args['detection_model_threshold'])
        self.face_locator = FaceLocator(net_face_detect,
                                        args['detection_model_threshold'],
                                        NetworkType(args['detection_model']))
        # setup device plugins
        if next(iter(args['device'])) == 'CPU':
            # CPU
            self.ie.set_config(config={
                "CPU_THROUGHPUT_STREAMS": "1",
                "CPU_THREADS_NUM": "8",
            },
                               device_name='CPU')
        elif next(iter(args['device'])) == 'GPU':
            # GPU
            pass
            self.ie.set_config(config={"GPU_THROUGHPUT_STREAMS": "1"},
                               device_name='GPU')
        elif next(iter(args['device'])) == 'MYRIAD':
            pass
        # load to device for inferencing
        self.face_locator.deploy_network(next(iter(args['device'])), self.ie)

        if self.modes['landmark']:
            net_landmarks_detect = self.__prepare_network(
                args['landmarks_model'])
            self.landmarks_locator = LandmarksLocator(net_landmarks_detect)
            self.landmarks_locator.deploy_network(next(iter(args['device'])),
                                                  self.ie)

        if self.modes['recognize']:
            net_recognize_face = self.__prepare_network(
                args['recognition_model'])
            self.face_recognizer = FaceRecognizer(net_recognize_face)
            self.face_recognizer.deploy_network(next(iter(args['device'])),
                                                self.ie)

        # todo other models or load separately

    @staticmethod
    def __determine_processing_mode(args: Dict) -> Dict[str, bool]:
        ret = {}
        if args['detection_model']:
            ret['detect'] = True
        else:
            ret['detect'] = False

        if args['landmarks_model']:
            ret['landmark'] = True
        else:
            ret['landmark'] = False

        if args['recognition_model']:
            ret['recognize'] = True
        else:
            ret['recognize'] = False
        return ret

    def __prepare_network(self, model_path: str) -> IENetwork:
        model_path = os.path.abspath(model_path)
        model = self.ie.read_network(model=model_path,
                                     weights=os.path.splitext(model_path)[0] +
                                     ".bin")
        return model

    def process_frame(
        self, frame: np.ndarray
    ) -> List[
            Union[List[FaceLocator.FacePosition],
                  List[LandmarksLocator.FaceLandmarks],
                  List[FaceRecognizer.FaceIdentity]]]:  # todo uniton with None
        faces_landmarks = faces_identities = None
        face_positions = self.face_locator.get_face_positions(frame)
        if self.modes['landmark']:
            faces_landmarks = self.landmarks_locator.get_landmarks(
                frame, face_positions)
        if self.modes['recognize']:
            faces_identities = self.face_recognizer.get_identities(
                frame, face_positions, faces_landmarks)
        return [face_positions, faces_landmarks, faces_identities]
Пример #30
0
class ReceptionRobot():
    # N_EPOCH = 50
    N_EPOCH = 100
    BATCH_SIZE = 96
    # RECOGNITION_IMAGE_SIZE = (32, 32)
    RECOGNITION_IMAGE_SIZE = (64, 64)
    SAVE_IMAGE_SIZE = (64, 64)
    PERSON_IMAGES_NUM = 10
    PLATFORM = Platform.macos.value

    def __init__(self):
        self.load_face_recognition_model()
        self.app = QApplication(sys.argv)

    def run(self):
        """ Start face recognize system.
        """
        self.say("おはようございます")
        self.capture()

    def capture(self):
        """ Capture video. Wait to detect face.
        """
        def loop_func(**kwargs):
            current_persons = self.recognize(kwargs.get('face_imgs'))
            kwargs.get('past_recognized_persons').append(current_persons)
            if len(kwargs.get('past_recognized_persons')) > 3:
                kwargs.get('past_recognized_persons').pop(0)
            # continuous person filtering.
            person_ids = Person.objects().distinct(field='_id')
            for persons in kwargs.get('past_recognized_persons')[-3:]:
                tmp_person_ids = []
                for person in persons:
                    tmp_person_ids.append(person._id)
                person_ids = list(set(person_ids) & set(tmp_person_ids))
            for person_id in person_ids:
                self.greet(Person.objects(_id=person_id).first())

        face_capture.FaceCapture.capture(
            loop_func=loop_func,
            continue_condition_func=lambda **kwargs: False,
            break_condition_func=lambda **kwargs: False,
        )

    def recognize(self, imgs: [np.array((None, None, 3))]):
        """ Recognize detected face.
        Args:
            img: numpy.array(), (?, ?, 3)
                unknown size of rgb image.
        Returns:
            persons: [person.Person()]
        """
        person_id_indexes = \
            self.face_recognizer.recognize(imgs)
        persons = []
        for person_id_index in person_id_indexes:
            person_id = self.face_recognizer.person_ids[person_id_index]
            person = Person.objects(_id=person_id).first()
            persons.append(person)
        return persons

    def greet(self, person: Person):
        """ Greet to recognized person.
        """
        self.say("こんにちは。{}さん".format(person.nickname))

    @classmethod
    def say(cls, message, speaker="kyoko"):
        """ Speak with MacOSX talk application
        """
        # print(os.path.dirname(__file__))
        cls.print(message)
        if cls.PLATFORM == Platform.macos.value:
            cls.say_macos(message, speaker=speaker)
        elif cls.PLATFORM == Platform.linux.value:
            cls.say_linux(message, speaker=speaker)

    @classmethod
    def say_macos(cls, message, speaker="kyoko"):
        talk_application = 'default'
        # talk_application = 'open-jtalk'

        # Text to voice.

        voice_output_dir = os.path.dirname(__file__) + "/voices/"
        if talk_application == 'default':
            voice_filename = "{voice_output_dir}{message}.aiff".format(
                voice_output_dir=voice_output_dir, message=message
            )
            say_command = "say -v {speaker} {message} -o {voice_dir}{message}.aiff".format(
                speaker=speaker, message=message, voice_dir=voice_output_dir)

        elif talk_application == 'open-jtalk':
            dict_dir = '/usr/local/Cellar/open-jtalk/1.09/dic'
            voice_dir = '/usr/local/Cellar/open-jtalk/1.09/voice/mei' \
                        '/mei_normal.htsvoice'
            voice_filename = "{voice_outut_dir}{message}.wav".format(
                voice_outut_dir=voice_output_dir, message=message
            )
            say_command = 'open_jtalk -x {dict_dir} -m {voice_dir} ' \
                          '-ow {voice_filename} ' \
                          '{message}'.format(
                message=message, dict_dir=dict_dir, voice_dir=voice_dir,
                voice_filename=voice_filename)
            print(say_command)
        else:
            return False
        # Convert to robotic voice.
        pitch = 350
        tempo = 1.15
        sox_convert_command = "sox {voice_filename} " \
                              "{voice_dir}{message}.wav " \
                              "echo 0.8 0.8 5 0.7 " \
                              "echo 0.8 0.7 6 0.7 " \
                              "echo 0.8 0.7 10 0.7 " \
                              "echo 0.8 0.8 12 0.7 " \
                              "echo 0.8 0.88 30 0.7 " \
                              "pitch {pitch} tempo {tempo}".format(
            message=message, pitch=pitch, tempo=tempo,
            voice_dir=voice_output_dir, voice_filename=voice_filename,
        )
        play_command = "play {voice_dir}{message}.wav".format(
            message=message, voice_dir=voice_output_dir)
        # if not os.path.exists("{voice_dir}{message}.wav"):
        os.system(say_command)
        os.system(sox_convert_command)
        os.system(play_command)

    @classmethod
    def say_linux(cls):
        """ Linux platform say method.
        """

    @classmethod
    def print(cls, message):
        """ Print message to application interface.
        Args:
            message:
        Returns:
        """
        print(message)

    def load_face_recognition_model(self):
        # person.Person.objects().distinct('_id')
        # self.face_recognizer = FaceRecognizer.objects(
        #     n_epoch=self.N_EPOCH,
        #     batch_size=self.BATCH_SIZE,
        #     image_size=self.RECOGNITION_IMAGE_SIZE,
        #     person_ids=Person.ascendind_ids(),
        # ).first()
        # if self.face_recognizer is None:
        #     return None
        person_ids = [obj._id for obj in Person.objects]
        self.face_recognizer = FaceRecognizer(
            n_epoch=self.N_EPOCH,
            batch_size=self.BATCH_SIZE,
            image_size=self.RECOGNITION_IMAGE_SIZE,
            person_ids=person_ids,
            person_num=len(person_ids)
        )
        self.face_recognizer.generate_filename()
        print(self.face_recognizer.filename)
        self.face_recognition_model = \
            self.face_recognizer.load_model()
        return self.face_recognition_model

    @classmethod
    def save_person_capture(cls, max_faces=100):
        """ save person with video capture.
        """
        nickname, last_name, first_name, company = \
            cls.ask_personality()
        cls.say("{}さんのことを覚えたいので5秒ほどビデオを撮りますね。".format(nickname))
        cls.say("はい。とりまーす!")
        def face_yield(face, faces):
            if len(faces) > cls.PERSON_IMAGES_NUM:
                yield face
            yield face

        all_face_imgs = face_capture.FaceCapture.capture(
            loop_func=lambda **kwargs: True,
            continue_condition_func=lambda **kwargs: len(
            kwargs.get('face_positions')) > 1,
            break_condition_func=lambda **kwargs:
                len(kwargs.get('all_face_imgs')) > cls.PERSON_IMAGES_NUM)
        person_obj = Person(
            nickname=nickname,
            last_name=last_name,
            first_name=first_name,
            company=company,
        )
        person_obj.set_face_imgs(all_face_imgs, cls.SAVE_IMAGE_SIZE)
        cls.say("今{}さんのこと覚えてます。1分くらいかかるかもしれません。".format(
            nickname))
        # cls.say("たぶん大丈夫ですが、僕はロボットなのでデータの保存に失敗すると"
        #         "全て忘れてしまうので...")
        person_obj.save()
        cls.say("{}さんのことバッチリ覚えました!またお会いしましょう。".format(
            nickname))
        # cls.say("あ、でも一回寝ないと顔で思い出せない作りになっているので、"
        #         "また明日以降あったときはご挨拶させてもらいますね。")

    @classmethod
    def ask_personality(cls):
        app = QApplication(sys.argv)
        window = Window(title='Opus',
                             labels=['ニックネーム', '姓', '名', '会社名'])
        # cls.say("はじめまして。私は人工知能のオーパスです。")
        # cls.say("あなたのことを知りたいので名前と会社名を教えてください。"
        #         "ニックネームには、私に呼ばれたい名前を入れてください。")
        cls.say("名前を教えてね。")
        app.exit(app.exec_())

        # cls.say("私に呼ばれたい名前を入れてください")
        # nickname = input()
        # cls.say("{} さん ですね。よろしくお願いします。".format(nickname))
        # cls.say("あと苗字と名前、所属会社を教えてください。")
        # cls.say("まずは苗字をお願いします。")
        # last_name = input()
        # cls.say("次に名前")
        # first_name = input()
        # cls.say("最後に所属会社をお願いします。")
        # company = input()
        # cls.say("{}の{} {}さんですね。登録しておきます。".format(
        #     company, last_name, first_name))
        # return nickname, last_name, first_name, company
        texts = window.texts
        return texts

    @classmethod
    def memorize_face(self):
        """ Learn face model.
        Returns:
        """
        # FaceRecognizer.fit(n_epoch=5)
        FaceRecognizer.fit(
            n_epoch=self.N_EPOCH, batch_size=self.BATCH_SIZE,
            image_size=self.RECOGNITION_IMAGE_SIZE,)
Пример #31
0
class FaceFinder(Process, Broadcaster, Listener):
    """Face finder process"""
    def __init__(self):
        Process.__init__(self)
        Broadcaster.__init__(self, 'face_finder')
        Listener.__init__(self)
        self.detector = cv2.CascadeClassifier(
            "haarcascade_frontalface_default.xml")

    def run(self):
        import dlib
        from people import People
        from gender_recognizer_tf import GenderRecognizer
        from face_recognizer import FaceRecognizer
        self.gender_recognizer = GenderRecognizer()
        self.p = People("./known_people")
        self.face_recognizer = FaceRecognizer(
            self.p.people
        )  #this is not ok , we should pass onlu self.p as object
        self.predictor = dlib.shape_predictor(
            "shape_predictor_68_face_landmarks.dat")
        self.fa = FaceAligner(self.predictor, desiredFaceWidth=160)
        self.state = IdleState()
        while True:
            label, data = self.recv()
            if label == 'video_feed':
                frame = data[0]
                face = None
                aligned_image, face, rect_nums, XY = self.face_extractor(frame)
                if face is None:
                    continue

                face_locations, face_names, percent = self.face_recognizer.recognize(
                    face)
                in_the_frame = self.__update_state(self.p.people, percent)

                if (self.state.__class__ == MatchState):
                    path, gender, age = self.gender_recognizer.recognize(
                        aligned_image, face, rect_nums, XY)
                    self.broadcast(["Match", face, in_the_frame, gender, age])
                else:
                    path, gender, age = self.gender_recognizer.recognize(
                        aligned_image, face, rect_nums, XY)
                    if path is None:
                        continue
                    print("FaceFinder path = " + path)
                    self.broadcast(["NoMatch", path])

    def face_extractor(self, frame):
        import dlib
        image = imutils.resize(frame, width=256)
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        # detect faces in the grayscale frame
        rects = self.detector.detectMultiScale(gray,
                                               scaleFactor=1.1,
                                               minNeighbors=5,
                                               minSize=(15, 15),
                                               flags=cv2.CASCADE_SCALE_IMAGE)
        if len(rects) > 0:
            (x, y, w, h) = rects[0]
            rect = dlib.rectangle(int(x), int(y), int(x + w), int(y + h))
            face = self.fa.align(image, gray, rect)
            return [face], face, len(rects), [(x, y)]
        return [], None, 0, []

    def __update_state(self, people, percent):
        if percent < 0.8:
            if self.state.__class__ != IdleState:
                self.state = IdleState()
            return None
        else:
            in_the_frame = [p for p in people if p.in_the_frame]

            if len(in_the_frame) == 0:
                if percent > 0.8:
                    if self.state.__class__ != SearchingState:
                        self.state = SearchingState()
            elif in_the_frame[0].id is None:
                if self.state.__class__ != NoMatchState:
                    self.state = NoMatchState()
            else:
                if self.state.__class__ != MatchState:
                    self.state = MatchState()
            return in_the_frame
Пример #32
0
def train():
    opt = opts.parse_opt()
    opt.input_data = "MNIST"

    img_size = (opt.img_dim, opt.img_dim)
    print 'Dimension of images:', img_size
    train_data, train_label, id_gender = \
        get_30_people_chunk(opt.image_path, 1, gender_meta=True, img_size=img_size)
    test_data, test_label = get_30_people_chunk(opt.image_path,
                                                2,
                                                img_size=img_size)
    names = get_people_names(opt.image_path, 30)

    if opt.balance_data:
        ratio = opt.balance_ratio
        print 'Balancing dataset with ratio %f' % ratio
        train_data, train_label = balance_dataset(train_data, train_label)
        test_data, test_label = balance_dataset(test_data, test_label)

    if opt.balance_gender:
        print train_data.shape, train_label.shape
        print test_data.shape, test_label.shape
        print 'Balancing genders'
        selected_people = []
        for i in range(id_gender.shape[1]):
            indices, = np.where(id_gender[:, i] == 1)
            selected_people.append(np.random.choice(indices, 5, replace=False))
        selected_people = np.concatenate(selected_people)

        print 'Selected people are:'
        print np.array(names)[selected_people]

        selected_imgs = train_label[:, selected_people].sum(axis=1) != 0
        train_data = train_data[selected_imgs, :]
        train_label = train_label[selected_imgs, :]

        selected_imgs = test_label[:, selected_people].sum(axis=1) != 0
        test_data = test_data[selected_imgs, :]
        test_label = test_label[selected_imgs, :]

    print 'Shape of data:'
    print '\tTraining data: ' + str(train_data.shape)
    print '\tTraining label: ' + str(train_label.shape)
    print '\tMax, Min Train: %.4f, %.4f' % (np.max(train_data),
                                            np.min(train_data))
    print '\tTest data: ' + str(test_data.shape)
    print '\tTest label: ' + str(test_label.shape)
    print '\tMax, Min Test: %.4f, %.4f' % (np.max(test_data),
                                           np.min(test_data))

    x_dim = train_data.shape[1]
    y_dim = train_label.shape[1]

    opt.input_c_dim = 3
    opt.output_c_dim = 3
    opt.input_dim = x_dim
    opt.label_dim = y_dim
    input_shape = (x_dim, x_dim, opt.input_c_dim)

    batch_size = opt.batch_size
    print 'Batch size: %d' % batch_size

    NUM_REPR = 5
    NUM_SAMPLES_EACH = int(batch_size / NUM_REPR / 2)
    output_samples = get_output_samples(train_data, train_label, id_gender,
                                        NUM_REPR, NUM_SAMPLES_EACH)

    NUM_THREADS = 2
    tf_config = tf.ConfigProto()
    tf_config.intra_op_parallelism_threads = NUM_THREADS
    tf_config.gpu_options.allow_growth = True

    iteration_time = []
    with tf.Session(config=tf_config) as sess:

        id_model_path = '%s_%d_id_0' % (opt.lfw_base_path, x_dim)
        print '\tRetrieving evil model from "%s"' % id_model_path
        evil_model = FaceRecognizer(id_model_path, train_label.shape[1],
                                    input_shape, opt.input_c_dim)

        gender_model_path = '%s_%d_gender_0' % (opt.lfw_base_path, x_dim)
        print '\tRetrieving good model from "%s"' % gender_model_path
        good_model = FaceRecognizer(gender_model_path, 2, input_shape,
                                    opt.input_c_dim)
        model = advGAN(good_model, evil_model, opt, sess, mnist=False)

        iteration = 0
        if opt.resnet_gen:
            generator_mode = 'ResNet'
        else:
            generator_mode = 'Regular'
        summary_dir = "logs/LFW/g_%d_ld_%d_gl_%d_L2_%.2f_lr_%.4f_%s/" % (
            opt.G_lambda, opt.ld, opt.good_loss_coeff, opt.L2_lambda,
            opt.learning_rate, generator_mode)
        if os.path.isdir(summary_dir) is False:
            print 'Creating directory %s for logs.' % summary_dir
            os.mkdir(summary_dir)
        # else:
        #     print 'Removing all files in %s' % (summary_dir + '*')
        #     shutil.rmtree(summary_dir)

        writer = tf.summary.FileWriter(summary_dir, sess.graph)
        loader = Dataset2(train_data, train_label)
        print 'Training data loaded.'

        print 'Maximum iterations: %d' % opt.max_iteration
        max_acc_diff = -1.0
        while iteration < opt.max_iteration:
            # this function returns (data, label, np.array(target)).
            feed_data, evil_labels, real_data = loader.next_batch(
                batch_size, negative=False)
            good_labels = id_gender[np.argmax(evil_labels, axis=1)]

            feed = {
                model.source: feed_data,
                model.target: real_data,
                model.good_labels: good_labels,
                model.evil_labels: evil_labels
            }

            # Training G once.
            summary_str, G_loss, _ = sess.run(
                [model.total_loss_merge_sum, model.g_loss, model.G_train_op],
                feed)
            writer.add_summary(summary_str, iteration)

            # Training G twice.
            summary_str, G_loss, gan_loss, hinge_loss, l1_loss, l2_loss, \
                good_fn_loss, evil_fn_loss, adv_loss, total_loss, _ = sess.run([
                    model.total_loss_merge_sum,
                    model.g_loss,
                    model.gan_loss,
                    model.hinge_loss,
                    model.l1_loss,
                    model.l2_loss,
                    model.good_fn_loss,
                    model.evil_fn_loss,
                    model.adv_loss,
                    model.total_loss,
                    model.G_train_op], feed)
            writer.add_summary(summary_str, iteration)

            # Training D.
            summary_str, D_loss, _ = \
                sess.run([model.total_loss_merge_sum, model.d_loss, model.D_pre_train_op], feed)
            writer.add_summary(summary_str, iteration)

            if iteration % opt.losses_log_every == 0:
                print "iteration: ", iteration
                print '\tD: %.4f, G: %.4f\n\thinge(%.2f): %.4f, L1(%.2f): %.4f, L2(%.2f): %.4f' % (
                    D_loss, G_loss, opt.H_lambda, hinge_loss, opt.L1_lambda,
                    l1_loss, opt.L2_lambda, l2_loss)
                print '\t\tGAN total loss: %.4f' % gan_loss
                print '\tGood: %.4f, Evil: %.4f' % (good_fn_loss, evil_fn_loss)
                print '\tAdv: %.4f, Total: %.4f' % (adv_loss, total_loss)

                new_test_data = []
                new_pred_data = []
                head = 0
                last_batch = False
                while head < test_data.shape[0]:
                    if head + batch_size <= test_data.shape[0]:
                        tail = head + batch_size
                    else:
                        tail = test_data.shape[0]
                        head = test_data.shape[0] - batch_size
                        last_batch = True
                    cur_data, pred_data = sess.run(
                        [model.fake_images_output, model.prediction_ready],
                        {model.source: test_data[head:tail, :]})

                    if last_batch:
                        new_test_data.append(
                            cur_data[-(test_data.shape[0] % batch_size):, :])
                        new_pred_data.append(
                            pred_data[-(test_data.shape[0] % batch_size):, :])
                    else:
                        new_test_data.append(cur_data)
                        new_pred_data.append(pred_data)
                    head += batch_size
                new_test_data = np.concatenate(new_test_data)
                new_pred_data = np.concatenate(new_pred_data)

                good_pred = np.argmax(
                    model.good_model.model.predict(new_pred_data), axis=1)
                evil_pred = np.argmax(
                    model.evil_model.model.predict(new_pred_data), axis=1)
                evil_true = np.argmax(test_label, axis=1)
                good_true = np.argmax(id_gender[evil_true, :], axis=1)

                good_accuracy = accuracy_score(good_true, good_pred)
                evil_accuracy = accuracy_score(evil_true, evil_pred)
                total_good_confusion = confusion_matrix(good_true, good_pred)
                total_evil_confusion = confusion_matrix(
                    evil_true, evil_pred, labels=range(opt.evil_label_num))

                print '\tGood Accuracy: %.4f, Evil Accuracy: %.4f' % (
                    good_accuracy, evil_accuracy)
                print '\tAccuracy diff: %f' % (good_accuracy - evil_accuracy)
                print 'Good confusion matrix:'
                print total_good_confusion
                evil_misclass = total_evil_confusion.sum(
                    axis=0) - np.diag(total_evil_confusion)
                evil_idxs = np.argsort(-evil_misclass)
                print 'Top 3 Misclassifications:'
                print np.array(names)[evil_idxs][:3]
                print evil_misclass[evil_idxs][:3]
                evil_tp = np.diag(total_evil_confusion)
                evil_idxs = np.argsort(-evil_tp)
                print 'Top 3 True classifications:'
                print np.array(names)[evil_idxs][:3]
                print evil_tp[evil_idxs][:3]

                # print 'Selected people are:'
                # print names[evil_idxs].tolist()
                # print evil_tp
                # print total_evil_confusion
                # print evil_idxs

                fake_samples, fake_noise = sess.run(
                    [model.fake_images_output, model.fake_noise_output],
                    {model.source: output_samples})

                fakes = merge(fake_samples, [2 * NUM_REPR, NUM_SAMPLES_EACH])
                original = merge(output_samples,
                                 [2 * NUM_REPR, NUM_SAMPLES_EACH])
                noise = merge(fake_noise, [2 * NUM_REPR, NUM_SAMPLES_EACH])
                final_image = np.concatenate([fakes, noise, original], axis=1)

                scipy_imsave('snapshot_%d.png' % iteration, final_image)

                if (good_accuracy - evil_accuracy) > max(0.5, max_acc_diff):
                    print '\tSaving new training data at accuracy diff: %.4f' % (
                        good_accuracy - evil_accuracy),
                    max_acc_diff = good_accuracy - evil_accuracy

                    # other_good = FaceRecognizer('%s_%d_gender_0' % (opt.lfw_base_path, x_dim),
                    #                             2, input_shape, opt.input_c_dim)

                    # other_pred = np.argmax(other_good.model.predict(new_pred_data), axis=1)
                    # print 'Other Good accuracy: %.4f' % accuracy_score(good_true, other_pred)

                    # other_pred = np.argmax(other_good.model.predict(
                    #     preprocess_images(new_test_data * 255.0)), axis=1)
                    # print '\tTest data processeced accuracy: %.4f' % \
                    #     accuracy_score(good_true, other_pred)

                    # other_evil = FaceRecognizer('%s_%d_id_0' % (opt.lfw_base_path, x_dim),
                    #                             34, input_shape, opt.input_c_dim)
                    # other_pred = np.argmax(other_evil.model.predict(new_pred_data), axis=1)
                    # print 'Other Evil accuracy: %.4f' % accuracy_score(evil_true, other_pred)
                    # other_pred = np.argmax(other_evil.model.predict(
                    #     preprocess_images(new_test_data * 255.0)), axis=1)
                    # print '\tTest data processeced accuracy: %.4f' % \
                    #     accuracy_score(evil_true, other_pred)

                    new_train_data = []
                    head = 0
                    last_batch = False
                    while head < train_data.shape[0]:
                        if head + batch_size <= train_data.shape[0]:
                            tail = head + batch_size
                        else:
                            tail = train_data.shape[0]
                            head = train_data.shape[0] - batch_size
                            last_batch = True
                        cur_data = sess.run(
                            model.fake_images_output,
                            {model.source: train_data[head:tail, :]})

                        if last_batch:
                            new_train_data.append(
                                cur_data[-(train_data.shape[0] %
                                           batch_size):, :])
                        else:
                            new_train_data.append(cur_data)
                        head += batch_size
                    new_train_data = np.concatenate(new_train_data)

                    np.savez_compressed(opt.output_path,
                                        train_data=new_train_data,
                                        org_train_data=train_data,
                                        train_label=train_label,
                                        test_data=new_test_data,
                                        org_test_data=test_data,
                                        test_label=test_label,
                                        id_gender=id_gender)
                    print '\t[DONE]'

            iteration += 1
Пример #33
0
 def __init__(self):
     self.face_finder = FaceFinder()
     self.recognizer = FaceRecognizer()
     self.recognizer.load('trainer.yml')
     self.security_trigger = Trigger(20, lambda similarity: similarity > 80)