Exemple #1
0
    def __init__(self):
        self.node_name = rospy.get_name()
        rospy.loginfo("[%s] Initializing......" % (self.node_name))

        self.bridge = CvBridge()
        self.visualization = True

        self.image_msg = None
        self.pub_detections = rospy.Publisher("~image_face",
                                              CompressedImage,
                                              queue_size=1)
        self.recognizer = FaceRecognizer(scale=3)

        rospy.Service('~detect_face_locations', GetFaceDetections,
                      self.cbDetectFaceLocations)
        rospy.Service('~detect_face_labels', GetFaceDetections,
                      self.cbDetectFaceLabels)
        rospy.Service('~list_face_labels', GetStrings, self.cbListFaceLabels)
        rospy.Service('~add_face_label', SetString, self.cbAddFaceLabel)
        rospy.Service('~remove_face_label', SetString, self.cbRemoveFaceLabel)
        # self.sub_image = rospy.Subscriber("~image_raw", Image, self.cbImg , queue_size=1)
        self.sub_image = rospy.Subscriber("~image_raw/compressed",
                                          CompressedImage,
                                          self.cbImg,
                                          queue_size=1)

        # rospy.loginfo("[%s] wait_for_service : camera_get_frame..." % (self.node_name))
        # rospy.wait_for_service('~camera_get_frame')
        # self.get_frame = rospy.ServiceProxy('~camera_get_frame', GetFrame)
        rospy.loginfo("[%s] Initialized." % (self.node_name))
def test_gender_recognizer(args):
    print 'Identity Recognizer:'
    input_shape = (args.image_size, args.image_size, args.num_channels)
    print 'Input shape:', input_shape
    X, y = get_30_people_chunk(args.image_path, 0, gender_label=True)

    print 'images shape:', X.shape
    print 'labels shape:', y.shape

    identity_recognizer = FaceRecognizer(args.model_path + '_gender',
                                         y.shape[1], input_shape,
                                         args.num_channels)

    identity_recognizer.model.compile(loss=TEST_SOFTMAX,
                                      optimizer=TEST_SGD,
                                      metrics=['accuracy'])

    y_pred = identity_recognizer.model.predict(X)
    y_pred_ct = np.argmax(y_pred, axis=1)
    y_true_ct = np.argmax(y, axis=1)
    acc = np.sum(y_pred_ct == y_true_ct) / y_true_ct.shape[0]

    print '\tAccuracy on training data: %.4f' % acc

    X_val, y_val = get_30_people_chunk(args.image_path, 1, gender_label=True)
    y_pred = identity_recognizer.model.predict(X_val)
    y_pred_ct = np.argmax(y_pred, axis=1)
    y_true_ct = np.argmax(y_val, axis=1)
    acc = np.sum(y_pred_ct == y_true_ct) / y_true_ct.shape[0]

    print '\tAccuracy on validation data: %.4f' % acc
Exemple #3
0
def time_fr(image_folder=None):
    embedding_file = 'embeddings.npy'
    if os.path.exists(embedding_file):
        os.remove(embedding_file)

    face_recog = FaceRecognizer(model_path='model/20180402-114759')
    if image_folder is None:
        image_path = 'faces.jpg'
        image_paths = [image_path for i in range(10)]
    else:
        image_paths = os.listdir(image_folder)
        image_paths = [os.path.join(image_folder, p) for p in image_paths if p.endswith('jpg')]

    for i, image_path in enumerate(image_paths):
        print('running: %d'%i)
        image = face_recog.read_image_sp(image_path)
        image = resize_image(image)
        det_start = time.time()
        bboxes, scores, keypoints = face_recog.detect_faces_and_keypoints(image)
        det_stop = time.time()
        print('detection time = ', det_stop - det_start)

        if len(bboxes) == 0:
            print('no face detected')
        else:
            rec_start = time.time()
            face_patches = face_recog.prepare_image_patches(image, bboxes, 160, 20)
            embeddings = face_recog.extract_embedding(face_patches)
            rec_stop = time.time()
            print('recognition time = ', rec_stop - rec_start)
Exemple #4
0
def main(face_recognition_method: str = None):
    webcam = cv.VideoCapture(0)

    data_aquisition = DataAquisition(dataset_dir='images/training/',
                                     resources_dir='resources/')

    known_people = data_aquisition.create_dataset(webcam)

    if len(known_people) == 1:
        print('Should have any face to recognition')
        return

    print('Training model...')
    data_aquisition.train(face_recognition_method)

    print('Initialzing face recognizer...')
    face_recognizer = FaceRecognizer(resources_dir='resources/',
                                     recognizer_name=face_recognition_method)

    while True:
        if cv.waitKey(1) & 0xFF == ord('q'):
            break

        _, c_frame = webcam.read()

        c_frame, face_locations, labels, factors = face_recognizer.predict(
            c_frame, known_people)

        cv.imshow('webcam', c_frame)

    webcam.release()
    cv.destroyAllWindows()
Exemple #5
0
def history_recollection():
    history = FaceRecognizer(user_interface)
    history.build_imagecsv()
    user_number = history.RecognizeFace()
    user_name = history.names[user_number]

    if user_name is None:
        #print "did not recognize user ", user_name
        chatbot_response = "I don't think we've met before, what's your name?"
        user_interface.update_sprites(chatbot_response, " ".join(("Emotion: ", meeting_emotion)), " ".join(("User: "******"Unknown")), "Primary Topics: ")
        user_interface.render()
        text_to_speech(chatbot_response)
        user_name = speech.recognize_speech()
        user_interface.update_sprites(chatbot_response, " ".join(("Emotion: ", meeting_emotion)), " ".join(("User: "******"Primary Topics: ")
        user_interface.render()
        #print "Name entered as: ", user_name
        history.retrain(user_name)

    else:
        #print "recognized user ", user_name
        chatbot_response = "It's good to see you again, " + user_name
        user_interface.update_sprites(chatbot_response, " ".join(("Emotion: ", meeting_emotion)), " ".join(("User: "******"Primary Topics: ")
        user_interface.render()
        text_to_speech(chatbot_response)

    history.exit()
    return user_name
Exemple #6
0
 def initialize(self):
     if self.network_name == 'YOLO':
         self.network = YOLO_TF()
     elif self.network_name == 'SSD-brainlab':
         self.brainlab_args = UpdatePredictConfiguration()
         assert self.brainlab_args.batch_size == 1, 'Batch size must be 1'
         self.network = BrainLabNet.BrainLabNet(self.brainlab_args,
                                                'interactive')
         self.network.start_interactive_session(self.brainlab_args)
         self.faceRecognizer = FaceRecognizer()
     elif self.network_name == 'SSD-mobilenet-face':
         self.network = ssd_mobilenet_face_wrapper.ssd_mobilenet_face(
             self.threshold)
         self.faceRecognizer = FaceRecognizer()
     else:
         raise Exception('Network name not recognized')
Exemple #7
0
 def initialize(self):
     if self.network_name == 'YOLO':
         self.network = YOLO_TF()
     elif self.network_name == 'SSD-mobilenet-face':
         self.network = ssd_mobilenet_face_wrapper.ssd_mobilenet_face(
             self.threshold)
         self.faceRecognizer = FaceRecognizer()
     else:
         raise Exception('Network name not recognized')
Exemple #8
0
 def run_animation(self):
     """
     Run the animation of the face
     :return:
     """
     self.runText.set("Press 'Esc' to stop")
     self.rc = FaceRecognizer(self.root, self.canvas, self.runText,
                              self.runBtn, self.chatVar)
     self.rc.start()
     self.runText.set("Start Animation")
Exemple #9
0
    def __init__(self, url, faces_path, tolerance):
        self.url = url

        # load stream video
        self.camera = cv2.VideoCapture(self.url)
        if self.camera.isOpened() is False:
            print("Error: Failed to open the video source.")

        self.video_frame = self.camera.get(cv2.CAP_PROP_FRAME_COUNT)
        self.video_fps = self.camera.get(cv2.CAP_PROP_FPS)
        self.image_x = int(self.camera.get(cv2.CAP_PROP_FRAME_WIDTH))
        self.image_y = int(self.camera.get(cv2.CAP_PROP_FRAME_HEIGHT))

        self.face_recognizer = FaceRecognizer(faces_path, tolerance)
    def __init__(self, args: Dict):
        # todo proper handling of self.modes
        self.ie = IECore()
        self.modes = self.__determine_processing_mode(args)
        net_face_detect = net_landmarks_detect = net_recognize_face = None

        if not self.modes['detect']:
            raise ValueError('detection model undefined')
        # load networks from file
        net_face_detect = self.__prepare_network(args['detection_model'])
        # put it to corresponding class
        # self.face_locator = FaceLocator(net_face_detect, args['detection_model_threshold'])
        self.face_locator = FaceLocator(net_face_detect,
                                        args['detection_model_threshold'],
                                        NetworkType(args['detection_model']))
        # setup device plugins
        if next(iter(args['device'])) == 'CPU':
            # CPU
            self.ie.set_config(config={
                "CPU_THROUGHPUT_STREAMS": "1",
                "CPU_THREADS_NUM": "8",
            },
                               device_name='CPU')
        elif next(iter(args['device'])) == 'GPU':
            # GPU
            pass
            self.ie.set_config(config={"GPU_THROUGHPUT_STREAMS": "1"},
                               device_name='GPU')
        elif next(iter(args['device'])) == 'MYRIAD':
            pass
        # load to device for inferencing
        self.face_locator.deploy_network(next(iter(args['device'])), self.ie)

        if self.modes['landmark']:
            net_landmarks_detect = self.__prepare_network(
                args['landmarks_model'])
            self.landmarks_locator = LandmarksLocator(net_landmarks_detect)
            self.landmarks_locator.deploy_network(next(iter(args['device'])),
                                                  self.ie)

        if self.modes['recognize']:
            net_recognize_face = self.__prepare_network(
                args['recognition_model'])
            self.face_recognizer = FaceRecognizer(net_recognize_face)
            self.face_recognizer.deploy_network(next(iter(args['device'])),
                                                self.ie)
Exemple #11
0
    def __init__(self):
        #observer = Observable()
        #motion_detector = MotionObserver('Motion Detector')
        #observer.register(motion_detector)
        self.camera = cv2.VideoCapture(0)
        time.sleep(0.5)
        self.motion_detector = MotionDetector()
        self.face_detector = FaceDetector()
        self.face_recognizer = FaceRecognizer()
        self.detectingstate = DetectingMotion(self)
        self.scanningstate = Scanning(self)

        self.facestate = FacialRecognition(self)
        self.greetingstate = GreetRoommate(self)
        self.waitingstate = WaitingForTask(self)
        self.servingstate = Serving(self)
        self.state = self.detectingstate
Exemple #12
0
	def __init__(self):
		"""Initialize the AppEngine

		Input:  None
		Output: Creates an instance of the AppEngine.
		"""

		# init QObject
		super().__init__()

		# create an instance of the database
		self.database = Database('../data/face_data.db')

		# ensure appropriate table is created
		create_sql = "CREATE TABLE IF NOT EXISTS faces (FaceName TEXT PRIMARY KEY," \
					 "FaceEncodings BLOB NOT NULL)"
		self.database.createDatabase(create_sql)

		# create an instance of the VideoStreamer
		self.videoStreamer = VideoStreamer()
		# connect the VideoStreamer's newPiFrame signal to the processNewFrame method
		self.videoStreamer.newPiFrame.connect(self.processNewFrame)

		# create an instance of the FaceRecognizer
		self.faceRecognizer = FaceRecognizer()

		# bool which determines if facial recognition is performed on each incoming frame from the Pi
		self.performFacialRecognition = False

		# current frame sent from the Pi, used for adding a new face
		self.currentFrame = None

		# current face name and encodings, used for adding a new face
		self.currentAddFaceName = None
		self.currentAddFaceEncodings = list()

		# create an instance of the AlertObserver
		self.alertObserver = AlertObserver()

		# bool determines if an alert has already been sent (only one is sent per application run)
		self.alertSent = False
    def __init__(self, detection_method, embedding_model):
        ''' function constructor

        Constructor for FaceDetector

        Args:
            detection_method (DetectionMethod): Method to use for detection
            embedding_model (FaceEmbeddingModelEnum): The model to use for generating
                            embeddings for face images

        Returns:
            None
        '''

        # load face detection engine
        self.face_detection_engine = FaceDetectionEngine(detection_method)
        self.face_recognizer = FaceRecognizer(embedding_model)
        self.embedding_image_dimensions = get_image_dimensions_for_embedding_model(embedding_model)

        self.start_time_stamp = None
        self.fps_font = ImageFont.truetype(font="/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf", size=20)
        self.face_label_font = ImageFont.truetype(font="/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf", size=20)
Exemple #14
0
    def run(self):
        import dlib
        from people import People
        from gender_recognizer_tf import GenderRecognizer
        from face_recognizer import FaceRecognizer
        self.gender_recognizer = GenderRecognizer()
        self.p = People("./known_people")
        self.face_recognizer = FaceRecognizer(
            self.p.people
        )  #this is not ok , we should pass onlu self.p as object
        self.predictor = dlib.shape_predictor(
            "shape_predictor_68_face_landmarks.dat")
        self.fa = FaceAligner(self.predictor, desiredFaceWidth=160)
        self.state = IdleState()
        while True:
            label, data = self.recv()
            if label == 'video_feed':
                frame = data[0]
                face = None
                aligned_image, face, rect_nums, XY = self.face_extractor(frame)
                if face is None:
                    continue

                face_locations, face_names, percent = self.face_recognizer.recognize(
                    face)
                in_the_frame = self.__update_state(self.p.people, percent)

                if (self.state.__class__ == MatchState):
                    path, gender, age = self.gender_recognizer.recognize(
                        aligned_image, face, rect_nums, XY)
                    self.broadcast(["Match", face, in_the_frame, gender, age])
                else:
                    path, gender, age = self.gender_recognizer.recognize(
                        aligned_image, face, rect_nums, XY)
                    if path is None:
                        continue
                    print("FaceFinder path = " + path)
                    self.broadcast(["NoMatch", path])
Exemple #15
0

ip_addresses = get_wifi_ip()
if len(ip_addresses) == 0:
    print("Unable to fetch the IP address. Hence running the server on localhost - 127.0.0.1")
    ip_addresses = tuple(["127.0.0.1"])

host = ip_addresses[0]
port = 9999

print("\n")
print("host = " + str(host))
print("port = " + str(port))

path_initializer.initialize_server_paths()
fr = FaceRecognizer(path_initializer.SERVER_KNOWN_FACES_FOLDER)
loaded_dataset_path = path_initializer.SERVER_MAIN_DATA_FOLDER + "/" + "trained_faces.pkl"
if not fr.load_from_file(loaded_dataset_path):
    print("\n\nPlease wait while the server is getting initialized ...\n")
    fr.train_on_folder_tree()
    fr.save_to_file(loaded_dataset_path)

print("\nPress \"ctrl+c\" to stop the server")

while True:
    print("\n")
    print("=====================")
    print("Server is running ...\n")
    s = socket(AF_INET,SOCK_DGRAM)
    s.bind((host,port))
Exemple #16
0
    def process_image(self, cv_image):
        if not self.enable:
            grey = cv.CreateImage(self.image_size, 8, 1)
            cv.CvtColor(cv_image, grey, cv.CV_BGR2GRAY)
            return grey

        self.frame_count = self.frame_count + 1
        haar = False
        # Use HAAR if no faces are tracked yet or every 5
        if (self.use_haar_only or not self.detect_box.any_trackable_faces()
            ) and self.auto_face_tracking:
            self.detect_face(cv_image)
            haar = True
        elif self.frame_count % 5 == 0:
            self.detect_face(cv_image)
            haar = True
        """ Otherwise, track the face using Good Features to Track and
        Lucas-Kanade Optical Flow """
        if not self.use_haar_only:
            for fkey in self.detect_box.faces.keys():
                face = self.detect_box.faces[fkey]
                if not face.is_trackable():
                    continue

                # Let's try to identify the person by this face.
                # Note: face is of type FaceBox
                x1, y1 = face.pt1[0], face.pt1[1]
                x2, y2 = face.pt2[0], face.pt2[1]
                pad = 10  # Just to relax the face boundary

                # crop the face
                face_cv_image = cv_image[y1:y2 + pad, x1:x2 + pad]
                # DEBUG cv.SaveImage('/tmp/face.png', face_cv_image)
                bridge = CvBridge()
                r = FaceRecognizer().infer(np.asarray(face_cv_image))
                face.name = r[0][0]

                if not face.track_box or not self.is_rect_nonzero(
                        face.track_box):
                    face.features = []
                    face.update_box(face.face_box())
                track_box = self.track_lk(cv_image, face)
                if track_box and len(track_box) != 3:
                    face.update_box(track_box)
                else:
                    face.update_box_elipse(track_box)
                """ Prune features that are too far from the main cluster """
                if len(face.features) > 0:
                    # Consider to move face class
                    ((mean_x, mean_y, mean_z), mse_xy, mse_z,
                     score) = self.prune_features(
                         min_features=face.abs_min_features,
                         outlier_threshold=self.std_err_xy,
                         mse_threshold=self.max_mse,
                         face=face)
                    if score == -1:
                        face.lost_face()
                        continue
                """ Add features if the number is getting too low """
                if len(face.features) < face.min_features:
                    face.expand_roi = self.expand_roi_init * face.expand_roi
                    self.add_features(cv_image, face)
                else:
                    face.expand_roi = self.expand_roi_init
        self.detect_box.nextFrame(haar)
        self.detect_box.publish_faces()
        return cv_image
 def __init__(self, dataset):
     self.detector = FaceDetector()
     self.recognizer = FaceRecognizer(dataset)
     self.dataset = dataset
import os
import numpy as np
import cv2
from face_finder import FaceFinder, get_region_of_interest
from face_recognizer import FaceRecognizer

face_finder = FaceFinder()
recognizer = FaceRecognizer()
dataset_dir = os.path.join('dataset', 'train')

x_train = []
y_labels = []
label = 1


def recursive_append(path, x_train, y_labels, current_label):
    for file in os.listdir(path):
        filepath = os.path.join(path, file)
        if os.path.isdir(filepath):
            recursive_append(filepath, x_train, y_labels, current_label)
        elif file.endswith('png') or file.endswith('jpg'):
            image_array = cv2.imread(filepath)
            image_array = cv2.cvtColor(image_array, cv2.COLOR_BGR2GRAY)
            # image_array = cv2.equalizeHist(image_array)
            # normalized_image_array = np.zeros(image_array.shape)
            # normalized_image_array = cv2.normalize(image_array, normalized_image_array)
            # while True:
            #     cv2.imshow('image', image_array)
            #     if cv2.waitKey(20) & 0xFF == ord('q'):
            #         break
            faces = face_finder.find(image_array)
Exemple #19
0
def create_faces_image():
    images = cv2.imread(load_face_images())
    face_recognize = FaceRecognizer(images)
    face_recognize.face_recoginaze()
Exemple #20
0
    video_frame = input_movie.get(cv2.CAP_PROP_FRAME_COUNT)
    video_fps = input_movie.get(cv2.CAP_PROP_FPS)
    image_x = int(input_movie.get(cv2.CAP_PROP_FRAME_WIDTH))
    image_y = int(input_movie.get(cv2.CAP_PROP_FRAME_HEIGHT))

    # Create an output movie file
    fourcc = cv2.VideoWriter_fourcc('M', 'P', '4', 'V')
    output_file_name = os.path.splitext(args.video)[0] + '_result.mp4'
    output_movie = cv2.VideoWriter(output_file_name, fourcc, video_fps,
                                   (image_x, image_y))

    # progress
    progress = tqdm.tqdm(total=video_frame)
    progress.set_description("{:12}".format("Initialize"))

    face_recognizer = FaceRecognizer(args.face, args.tol)

    frame_number = 0

    progress.set_description("{:12}".format("Processing"))
    while True:
        # Grab a single frame of video
        ret, frame = input_movie.read()
        frame_number += 1

        # Quit when the input video file ends
        if not ret:
            break

        res_img = face_recognizer.recognize(frame)
Exemple #21
0
parser.add_argument(
    '--predictor_path',
    type=str,
    required=True,
    help=
    'location of the dlib facial landmark predictor where shape_predictor_68_face_landmarks.dat is located'
)
parser.add_argument('--gallery_path',
                    type=str,
                    required=True,
                    help='location of the gallery')
parser.add_argument('--port', type=int, default=8000, help='which port to use')
args = parser.parse_args()

face_aligner = FaceAligner(args.predictor_path)
face_recognizer = FaceRecognizer(args.gallery_path, OpenCVAlgorithm,
                                 face_aligner)
register_handler = RegisterHandler(args.gallery_path, face_aligner)
recognize_handler = RecognizeHandler(args.gallery_path, face_aligner,
                                     face_recognizer)


class S(BaseHTTPRequestHandler):
    def _set_response(self, message=None):
        self.send_response(200)
        if message is not None:
            #self.send_header('Content-type', 'text/html')
            self.send_header('Content-type', 'application/json')

        self.end_headers()

    def do_GET(self):
Exemple #22
0
 def __init__(self):
     self.face_finder = FaceFinder()
     self.recognizer = FaceRecognizer()
     self.recognizer.load('trainer.yml')
     self.security_trigger = Trigger(20, lambda similarity: similarity > 80)
Exemple #23
0
def train():
    opt = opts.parse_opt()
    opt.input_data = "MNIST"

    img_size = (opt.img_dim, opt.img_dim)
    print 'Dimension of images:', img_size
    train_data, train_label, id_gender = \
        get_30_people_chunk(opt.image_path, 1, gender_meta=True, img_size=img_size)
    test_data, test_label = get_30_people_chunk(opt.image_path,
                                                2,
                                                img_size=img_size)
    names = get_people_names(opt.image_path, 30)

    if opt.balance_data:
        ratio = opt.balance_ratio
        print 'Balancing dataset with ratio %f' % ratio
        train_data, train_label = balance_dataset(train_data, train_label)
        test_data, test_label = balance_dataset(test_data, test_label)

    if opt.balance_gender:
        print train_data.shape, train_label.shape
        print test_data.shape, test_label.shape
        print 'Balancing genders'
        selected_people = []
        for i in range(id_gender.shape[1]):
            indices, = np.where(id_gender[:, i] == 1)
            selected_people.append(np.random.choice(indices, 5, replace=False))
        selected_people = np.concatenate(selected_people)

        print 'Selected people are:'
        print np.array(names)[selected_people]

        selected_imgs = train_label[:, selected_people].sum(axis=1) != 0
        train_data = train_data[selected_imgs, :]
        train_label = train_label[selected_imgs, :]

        selected_imgs = test_label[:, selected_people].sum(axis=1) != 0
        test_data = test_data[selected_imgs, :]
        test_label = test_label[selected_imgs, :]

    print 'Shape of data:'
    print '\tTraining data: ' + str(train_data.shape)
    print '\tTraining label: ' + str(train_label.shape)
    print '\tMax, Min Train: %.4f, %.4f' % (np.max(train_data),
                                            np.min(train_data))
    print '\tTest data: ' + str(test_data.shape)
    print '\tTest label: ' + str(test_label.shape)
    print '\tMax, Min Test: %.4f, %.4f' % (np.max(test_data),
                                           np.min(test_data))

    x_dim = train_data.shape[1]
    y_dim = train_label.shape[1]

    opt.input_c_dim = 3
    opt.output_c_dim = 3
    opt.input_dim = x_dim
    opt.label_dim = y_dim
    input_shape = (x_dim, x_dim, opt.input_c_dim)

    batch_size = opt.batch_size
    print 'Batch size: %d' % batch_size

    NUM_REPR = 5
    NUM_SAMPLES_EACH = int(batch_size / NUM_REPR / 2)
    output_samples = get_output_samples(train_data, train_label, id_gender,
                                        NUM_REPR, NUM_SAMPLES_EACH)

    NUM_THREADS = 2
    tf_config = tf.ConfigProto()
    tf_config.intra_op_parallelism_threads = NUM_THREADS
    tf_config.gpu_options.allow_growth = True

    iteration_time = []
    with tf.Session(config=tf_config) as sess:

        id_model_path = '%s_%d_id_0' % (opt.lfw_base_path, x_dim)
        print '\tRetrieving evil model from "%s"' % id_model_path
        evil_model = FaceRecognizer(id_model_path, train_label.shape[1],
                                    input_shape, opt.input_c_dim)

        gender_model_path = '%s_%d_gender_0' % (opt.lfw_base_path, x_dim)
        print '\tRetrieving good model from "%s"' % gender_model_path
        good_model = FaceRecognizer(gender_model_path, 2, input_shape,
                                    opt.input_c_dim)
        model = advGAN(good_model, evil_model, opt, sess, mnist=False)

        iteration = 0
        if opt.resnet_gen:
            generator_mode = 'ResNet'
        else:
            generator_mode = 'Regular'
        summary_dir = "logs/LFW/g_%d_ld_%d_gl_%d_L2_%.2f_lr_%.4f_%s/" % (
            opt.G_lambda, opt.ld, opt.good_loss_coeff, opt.L2_lambda,
            opt.learning_rate, generator_mode)
        if os.path.isdir(summary_dir) is False:
            print 'Creating directory %s for logs.' % summary_dir
            os.mkdir(summary_dir)
        # else:
        #     print 'Removing all files in %s' % (summary_dir + '*')
        #     shutil.rmtree(summary_dir)

        writer = tf.summary.FileWriter(summary_dir, sess.graph)
        loader = Dataset2(train_data, train_label)
        print 'Training data loaded.'

        print 'Maximum iterations: %d' % opt.max_iteration
        max_acc_diff = -1.0
        while iteration < opt.max_iteration:
            # this function returns (data, label, np.array(target)).
            feed_data, evil_labels, real_data = loader.next_batch(
                batch_size, negative=False)
            good_labels = id_gender[np.argmax(evil_labels, axis=1)]

            feed = {
                model.source: feed_data,
                model.target: real_data,
                model.good_labels: good_labels,
                model.evil_labels: evil_labels
            }

            # Training G once.
            summary_str, G_loss, _ = sess.run(
                [model.total_loss_merge_sum, model.g_loss, model.G_train_op],
                feed)
            writer.add_summary(summary_str, iteration)

            # Training G twice.
            summary_str, G_loss, gan_loss, hinge_loss, l1_loss, l2_loss, \
                good_fn_loss, evil_fn_loss, adv_loss, total_loss, _ = sess.run([
                    model.total_loss_merge_sum,
                    model.g_loss,
                    model.gan_loss,
                    model.hinge_loss,
                    model.l1_loss,
                    model.l2_loss,
                    model.good_fn_loss,
                    model.evil_fn_loss,
                    model.adv_loss,
                    model.total_loss,
                    model.G_train_op], feed)
            writer.add_summary(summary_str, iteration)

            # Training D.
            summary_str, D_loss, _ = \
                sess.run([model.total_loss_merge_sum, model.d_loss, model.D_pre_train_op], feed)
            writer.add_summary(summary_str, iteration)

            if iteration % opt.losses_log_every == 0:
                print "iteration: ", iteration
                print '\tD: %.4f, G: %.4f\n\thinge(%.2f): %.4f, L1(%.2f): %.4f, L2(%.2f): %.4f' % (
                    D_loss, G_loss, opt.H_lambda, hinge_loss, opt.L1_lambda,
                    l1_loss, opt.L2_lambda, l2_loss)
                print '\t\tGAN total loss: %.4f' % gan_loss
                print '\tGood: %.4f, Evil: %.4f' % (good_fn_loss, evil_fn_loss)
                print '\tAdv: %.4f, Total: %.4f' % (adv_loss, total_loss)

                new_test_data = []
                new_pred_data = []
                head = 0
                last_batch = False
                while head < test_data.shape[0]:
                    if head + batch_size <= test_data.shape[0]:
                        tail = head + batch_size
                    else:
                        tail = test_data.shape[0]
                        head = test_data.shape[0] - batch_size
                        last_batch = True
                    cur_data, pred_data = sess.run(
                        [model.fake_images_output, model.prediction_ready],
                        {model.source: test_data[head:tail, :]})

                    if last_batch:
                        new_test_data.append(
                            cur_data[-(test_data.shape[0] % batch_size):, :])
                        new_pred_data.append(
                            pred_data[-(test_data.shape[0] % batch_size):, :])
                    else:
                        new_test_data.append(cur_data)
                        new_pred_data.append(pred_data)
                    head += batch_size
                new_test_data = np.concatenate(new_test_data)
                new_pred_data = np.concatenate(new_pred_data)

                good_pred = np.argmax(
                    model.good_model.model.predict(new_pred_data), axis=1)
                evil_pred = np.argmax(
                    model.evil_model.model.predict(new_pred_data), axis=1)
                evil_true = np.argmax(test_label, axis=1)
                good_true = np.argmax(id_gender[evil_true, :], axis=1)

                good_accuracy = accuracy_score(good_true, good_pred)
                evil_accuracy = accuracy_score(evil_true, evil_pred)
                total_good_confusion = confusion_matrix(good_true, good_pred)
                total_evil_confusion = confusion_matrix(
                    evil_true, evil_pred, labels=range(opt.evil_label_num))

                print '\tGood Accuracy: %.4f, Evil Accuracy: %.4f' % (
                    good_accuracy, evil_accuracy)
                print '\tAccuracy diff: %f' % (good_accuracy - evil_accuracy)
                print 'Good confusion matrix:'
                print total_good_confusion
                evil_misclass = total_evil_confusion.sum(
                    axis=0) - np.diag(total_evil_confusion)
                evil_idxs = np.argsort(-evil_misclass)
                print 'Top 3 Misclassifications:'
                print np.array(names)[evil_idxs][:3]
                print evil_misclass[evil_idxs][:3]
                evil_tp = np.diag(total_evil_confusion)
                evil_idxs = np.argsort(-evil_tp)
                print 'Top 3 True classifications:'
                print np.array(names)[evil_idxs][:3]
                print evil_tp[evil_idxs][:3]

                # print 'Selected people are:'
                # print names[evil_idxs].tolist()
                # print evil_tp
                # print total_evil_confusion
                # print evil_idxs

                fake_samples, fake_noise = sess.run(
                    [model.fake_images_output, model.fake_noise_output],
                    {model.source: output_samples})

                fakes = merge(fake_samples, [2 * NUM_REPR, NUM_SAMPLES_EACH])
                original = merge(output_samples,
                                 [2 * NUM_REPR, NUM_SAMPLES_EACH])
                noise = merge(fake_noise, [2 * NUM_REPR, NUM_SAMPLES_EACH])
                final_image = np.concatenate([fakes, noise, original], axis=1)

                scipy_imsave('snapshot_%d.png' % iteration, final_image)

                if (good_accuracy - evil_accuracy) > max(0.5, max_acc_diff):
                    print '\tSaving new training data at accuracy diff: %.4f' % (
                        good_accuracy - evil_accuracy),
                    max_acc_diff = good_accuracy - evil_accuracy

                    # other_good = FaceRecognizer('%s_%d_gender_0' % (opt.lfw_base_path, x_dim),
                    #                             2, input_shape, opt.input_c_dim)

                    # other_pred = np.argmax(other_good.model.predict(new_pred_data), axis=1)
                    # print 'Other Good accuracy: %.4f' % accuracy_score(good_true, other_pred)

                    # other_pred = np.argmax(other_good.model.predict(
                    #     preprocess_images(new_test_data * 255.0)), axis=1)
                    # print '\tTest data processeced accuracy: %.4f' % \
                    #     accuracy_score(good_true, other_pred)

                    # other_evil = FaceRecognizer('%s_%d_id_0' % (opt.lfw_base_path, x_dim),
                    #                             34, input_shape, opt.input_c_dim)
                    # other_pred = np.argmax(other_evil.model.predict(new_pred_data), axis=1)
                    # print 'Other Evil accuracy: %.4f' % accuracy_score(evil_true, other_pred)
                    # other_pred = np.argmax(other_evil.model.predict(
                    #     preprocess_images(new_test_data * 255.0)), axis=1)
                    # print '\tTest data processeced accuracy: %.4f' % \
                    #     accuracy_score(evil_true, other_pred)

                    new_train_data = []
                    head = 0
                    last_batch = False
                    while head < train_data.shape[0]:
                        if head + batch_size <= train_data.shape[0]:
                            tail = head + batch_size
                        else:
                            tail = train_data.shape[0]
                            head = train_data.shape[0] - batch_size
                            last_batch = True
                        cur_data = sess.run(
                            model.fake_images_output,
                            {model.source: train_data[head:tail, :]})

                        if last_batch:
                            new_train_data.append(
                                cur_data[-(train_data.shape[0] %
                                           batch_size):, :])
                        else:
                            new_train_data.append(cur_data)
                        head += batch_size
                    new_train_data = np.concatenate(new_train_data)

                    np.savez_compressed(opt.output_path,
                                        train_data=new_train_data,
                                        org_train_data=train_data,
                                        train_label=train_label,
                                        test_data=new_test_data,
                                        org_test_data=test_data,
                                        test_label=test_label,
                                        id_gender=id_gender)
                    print '\t[DONE]'

            iteration += 1
Exemple #24
0
def main(path, correct_label):
    qr_flag = False
    fr = FaceRecognizer()
    video = cv2.VideoCapture(path)
    width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH) + 0.5)
    height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT) + 0.5)
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    out = cv2.VideoWriter('data/output.mp4', fourcc, 20.0, (width, height))

    face_correct_cnt = 0
    face_wrong_cnt = 0
    face_undetected_cnt = 0
    multiple_face_cnt = 0
    cfd_high = 0
    cnt = 0

    while video.isOpened():
        cnt += 1
        ret, frame = video.read()
        if not ret:
            break

        # QR code detection
        if not qr_flag:
            info = qr_decode(Image.fromarray(np.uint8(frame)))
            if info:
                print('QR code detected')
                print(info)
                qr_flag = True

        # face recognition
        faces = crop_image(frame)
        if len(faces) == 1:
            x, y, w, h = faces[0]
            image = cv2.resize(frame[y: y + h, x: x + w], (200, 200),
                               interpolation=cv2.INTER_LINEAR)
            image_pil = Image.fromarray(np.uint8(image)).convert('L')
            image = np.array(image_pil, 'uint8')
            label, cfd = fr.recognizer.predict(image)
            if label != correct_label:
                face_wrong_cnt += 1
            else:
                face_correct_cnt += 1
            if cfd > 40:
                cfd_high += 1

            # write rectangle to indicate where the face is
            color = (0, 255, 0) if label == correct_label else (0, 0, 255)
            cv2.rectangle(frame, (x, y), (x+w, y+h), color, 3)
            font = cv2.FONT_HERSHEY_SIMPLEX
            txt = 'Ayumu' if label == correct_label else 'not Ayumu'
            cv2.putText(frame, txt + '%.3f' % (cfd), (x, y), font, 1,
                        (255, 255, 255), 2, cv2.LINE_AA)

        elif len(faces) > 1:
            # print('multiple faces are detected!')
            multiple_face_cnt += 1
            # break
        elif len(faces) == 0:
            face_undetected_cnt += 1
        if face_wrong_cnt > threshold:
            print('face recognition failed')
            print(cnt)
            break

        out.write(frame)

    video.release()
    out.release()
    print('total frames', cnt)
    print('correct', face_correct_cnt)
    print('wrong', face_wrong_cnt)
    print('undetected', face_undetected_cnt)
    print('multiple', multiple_face_cnt)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--model_path',
        type=str,
        default="models/lfw",
        help="Path to save trained model. e.g.: 'models/lfw'")
    parser.add_argument(
        '--image_path',
        type=str,
        default='./lfw_data/perturbed.npz',
        help='Path to LFW data.')
    parser.add_argument(
        '--image_size', type=int, default=224, help='Size of input images.')
    parser.add_argument(
        '--num_channels',
        type=int,
        default=3,
        help='Number of channels in input images.')
    parser.add_argument(
        '--train_new',
        dest='train_new',
        action='store_true',
        help='Train a new classifier.')
    parser.set_defaults(train_new=False)
    args = parser.parse_args()
    input_shape = (args.image_size, args.image_size, args.num_channels)

    data = np.load(args.image_path)
    id_gender = data['id_gender']

    org_train_data = data['org_train_data']
    train_data = data['train_data']
    train_id = np.argmax(data['train_label'], axis=1)
    train_gender = np.argmax(id_gender[train_id, :], axis=1)
    test_data = data['test_data']
    org_test_data = data['org_test_data']
    test_id = np.argmax(data['test_label'], axis=1)
    test_gender = np.argmax(id_gender[test_id, :], axis=1)
    num_good_labels = 2
    num_evil_labels = data['train_label'].shape[1]

    print train_data.shape
    print train_data.min(), train_data.max()
    print org_train_data.shape
    print org_train_data.min(), org_train_data.max()
    print test_data.shape
    print test_data.min(), test_data.max()
    print org_test_data.shape
    print org_test_data.min(), org_test_data.max()

    print 'Preprocessing data:',
    for cur_data in [org_train_data, train_data, org_test_data, test_data]:
        cur_data = preprocess_images(cur_data * 255.0, version=1)
    print '[DONE]'

    good_used = FaceRecognizer('%s_%d_gender_0' % (args.model_path,
                                                   args.image_size),
                               num_good_labels, input_shape)

    good_left = FaceRecognizer('%s_%d_gender_1' % (args.model_path,
                                                   args.image_size),
                               num_good_labels, input_shape)

    evil_used = FaceRecognizer('%s_%d_id_0' % (args.model_path,
                                               args.image_size),
                               num_evil_labels, input_shape)

    evil_left = FaceRecognizer('%s_%d_id_1' % (args.model_path,
                                               args.image_size),
                               num_evil_labels, input_shape)

    for model, label, name in zip(
        [evil_used, good_used, evil_left, good_left],
        [test_id, test_gender, test_id, test_gender],
        ['Used Evil', 'Used Good', 'Left-out Evil', 'Left-out Good']):
        print name + ':'
        org_pred = np.argmax(model.model.predict(org_test_data), axis=1)
        org_acc = accuracy_score(label, org_pred)
        print '\tOriginal Accuracy: %.4f' % org_acc
        dst_pred = np.argmax(model.model.predict(test_data), axis=1)
        dst_acc = accuracy_score(label, dst_pred)
        print '\tPerturbed Accuracy: %.4f' % dst_acc

    if args.train_new:
        # Train a new classifier with the new training data, test with original test data.
        raise NotImplementedError(
            'Training new classifier is not yet implemented.')
Exemple #26
0
import cv2
import numpy as np
from fastapi import FastAPI, File

from face_recognizer import FaceRecognizer
face_rec = FaceRecognizer()
face_rec.create_known_faces('data/mask_nomask')

app = FastAPI()


@app.get("/")
def root():
    return {"message": "Hello World"}


@app.post("/update")
def update():
    face_rec.create_known_faces('data/mask_nomask')
    return {"result": 'ok'}


@app.post("/file")
def file(file: bytes = File(...)):
    image_array = np.frombuffer(file, dtype=np.uint8)  # numpy array
    img = cv2.imdecode(image_array, cv2.COLOR_RGBA2BGR)
    result = face_rec.recognize(img)
    return {"result": result}
 def __init__(self):
     self._load_dataset_index()
     self.face_recognizer = FaceRecognizer(test_model_weight_path)
from face_recognizer import FaceRecognizer

fce = FaceRecognizer()
# name of the person to be deleted
fce.delete_a_face('mals')