예제 #1
0
    def __init__(self, parent=None):
        config = configparser.ConfigParser()
        config.read("config.ini")
        device = "MYRIAD"

        self.config = config
        self.face_database, self.face_collection = self.loadFaceData()
        self.log_database, self.log_collection = self.loadLogData()
        plugin = IEPlugin(device, plugin_dirs=None)
        self.face_embed = FaceEmbedding(plugin)
        self.face_detect = MobileFaceDetect(plugin)
        self.head_pose = HeadPoseEst(plugin)
        self.emotion_detect = EmotionDetect(plugin)
        self.ag_estimate = AgeGenderEstimate(plugin)
        self.ch = int(config["CAMERA"]['Height'])
        self.cw = int(config["CAMERA"]['Width'])
        self.door_host = config["DOOR_CONTROLLER"]['Host']
        self.door_port = int(config["DOOR_CONTROLLER"]['Port'])
        self.door_name = config["DOOR_CONTROLLER"]['Name']
        self.door_url = "http://{}:{}{}".format(self.door_host, self.door_name,
                                                self.door_port)
        self.door_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.door_socket_timeout = float(config["DOOR_CONTROLLER"]['Timeout'])
        # try:
        #     self.door_socket.connect((self.door_host, self.door_port))
        # except:
        #     print("can't connect to door socket")
        self.open_door_signal = config["DOOR_CONTROLLER"]['Open_door_signal']
        self.open_door_signal = self.open_door_signal.encode()

        self.face_lap_min_score = float(
            config["MOBILE_FACE_DET"]['Laplacian_min_score'])
        self.face_min_size = float(
            config["FACE_CONSOLIDATION"]['Face_min_ratio']) * (self.ch *
                                                               self.cw)
        self.face_margin = float(config["FACE_CONSOLIDATION"]['Face_margin'])
        self.fm_threshold = float(config["FACE_MATCH"]['Face_threshold'])
        self.debug = int(config["FACE_MATCH"]['Debug'])
        self.no_face_frame_limit = int(config["DELAY"]['No_face_frame'])
        self.recognition_delay = float(config["DELAY"]['Recognition_delay'])
        self.recog_suc_delay = float(config["DELAY"]['Recognize_success'])
        self.recog_fai_delay = float(config["DELAY"]['Recognize_failed'])
        self.angle_min = ast.literal_eval(config["HEAD_POSE"]["Angle_min"])
        self.angle_max = ast.literal_eval(config["HEAD_POSE"]["Angle_max"])
        self.log_img_dir = config["IMAGE_DIR"]['Log']
        self.window_mult = float(config["DISPLAY"]['Window_mult'])
        self.max_signal_send = int(
            config["DOOR_CONTROLLER"]['Max_signal_send'])
        self.use_emo = int(config["ADD_MODULE"]['Emotion'])
        self.use_ag = int(config["ADD_MODULE"]['Age_gender'])
        self.display_draw = DisplayDraw()
        self.test_face = False
        self.face_img = cv2.imread("test_img/test_face.jpg")
        self.face_img = cv2.resize(self.face_img, (120, 180))
        if not os.path.exists(self.log_img_dir):
            os.mkdir(self.log_img_dir)
예제 #2
0
def main():
    cam = cv2.VideoCapture(0)
    # load data
    embed_file_path = "data/embed.pkl"
    face_database = loadData(embed_file_path)
    # load models
    device = "MYRIAD"
    plugin = IEPlugin(device, plugin_dirs=None)

    face_embed = FaceEmbedding(plugin)
    face_detect = MobileFaceDetect(plugin)
    # params
    config = configparser.ConfigParser()
    config.read("config.ini")
    fm_threshold = float(config["FACE_MATCH"]['Threshold'])
    label = "new"

    while (True):
        ret, frame = cam.read()
        if not ret:
            print("dead cam")
            cam = cv2.VideoCapture(0)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
            continue
        face_bboxes = face_detect.inference(frame)
        if len(face_bboxes) > 0:
            areas = [area(box) for box in face_bboxes]
            max_id = np.argmax(np.asarray(areas))
            mfb = face_bboxes[max_id]
            main_face = frame[mfb[1]:mfb[3], mfb[0]:mfb[2], :]
            # TODO real face detection
            # TODO face alignment
            # face_feature = face_embed(main_face, fe_net, fe_input_blob)
            # s = time.time()
            face_feature = face_embed.inference(main_face)
            # print(time.time() - s)
            # TODO face record
            best_match = bruteforce(face_feature, face_database, fm_threshold)
            if best_match is None:
                label = "new"
            else:
                label = str(best_match['id'])
            # visualize for debug
            cv2.rectangle(frame, (mfb[0], mfb[1]), (mfb[2], mfb[3]),
                          (255, 0, 0), 2)
            cv2.putText(frame,
                        label, (mfb[0], mfb[1]),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        0.6, (0, 0, 255),
                        lineType=cv2.LINE_AA)
        cv2.imshow("gandalf", frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
예제 #3
0
 def __init__(self, parent=None):
     config = configparser.ConfigParser()
     config.read("config.ini")
     self.config = config
     self.face_db, self.face_col = self.loadFaceData()
     self.log_db, self.log_col = self.loadLogData()
     self.face_img_dir = config["IMAGE_DIR"]['Employee']
     self.log_img_dir = config["IMAGE_DIR"]['Log']
     self.face_lap_min_score = float(config["MOBILE_FACE_DET"]['Laplacian_min_score_update'])
     self.fm_threshold = float(config["FACE_MATCH"]['Face_update_threshold_max'])
     self.fm_identical_threshold = float(config["FACE_MATCH"]['Face_update_threshold_min'])
     self.max_img_per_acc = int(config["MONGO"]['Max_img_per_acc'])
     device = "MYRIAD"
     plugin = IEPlugin(device, plugin_dirs=None)
     self.face_embed = FaceEmbedding(plugin)
예제 #4
0
 def __init__(self, parent=None):
     config = configparser.ConfigParser()
     config.read("config.ini")
     self.config = config
     self.face_detect = FaceDetection(config)
     self.face_embed = FaceEmbedding(config)
     # standby phase, delayed face detection
     self.standby_detection_delay = float(config["TASK_SCHEDULER"]['Standby_detection_delay'])
     self.standby_max_analysis = int(config["TASK_SCHEDULER"]['Standby_max_analysis'])
     self.clustering_upload_delay = float(config["TASK_SCHEDULER"]['Clustering_upload_delay'])
     self.max_img_per_person = int(config["UPLOAD"]['Max_img_per_person'])
     self.face_lap_min_var = float(config["FACE_CONSOLIDATION"]['Laplacian_min_variance'])
     self.face_margin_w_ratio = float(config["FACE_CONSOLIDATION"]['Face_margin_w_ratio'])
     self.face_margin_h_ratio = float(config["FACE_CONSOLIDATION"]['Face_margin_h_ratio'])
     self.xnet = Xnet(config)
     self.unprocess_face_queue = []
     self.face_queue = []
     self.feat_queue = []
     # self.standby_mode = True
     self.last_detected_face_time = time.time()
     self.last_clustering_time = time.time()
예제 #5
0
def main():
    src_dir = "new_register"
    dst_dir = "data"
    face_collection = loadFaceData()
    device = "MYRIAD"
    plugin = IEPlugin(device, plugin_dirs=None)
    face_embed = FaceEmbedding(plugin)
    cos_dst = 0.5
    for subdir, dirs, files in os.walk(src_dir):
        for d in dirs:
            print(d)
            dir_path = os.path.join(src_dir, d)
            feats = []
            name = d
            for sd, ds, files in os.walk(dir_path):
                for f in files:
                    file_path = os.path.join(sd, f)
                    img = cv2.imread(file_path)
                    fe = face_embed.inference(img)
                    feats.append(fe.tolist())
            new_face = {'name': name, 'feats': feats}
            # insert to Mongo
            p_id = face_collection.insert_one(new_face).inserted_id
            # commit changes
            face_collection.update_one({'_id': p_id}, {"$set": new_face},
                                       upsert=False)
            # create account imgs dir
            new_id = str(p_id)
            img_dir = os.path.join(dst_dir, str(new_id))
            if not os.path.exists(img_dir):
                os.mkdir(img_dir)
            # paste images to account dir
            for sd, ds, files in os.walk(dir_path):
                for f in files:
                    src_file = os.path.join(sd, f)
                    dst_file = os.path.join(img_dir, f)
                    print(dst_file)
                    shutil.copyfile(src_file, dst_file)
            print("done!")
예제 #6
0
 def __init__(self, parent=None):
     config = configparser.ConfigParser()
     config.read("config.ini")
     self.config = config
     self.face_detect = FaceDetection(config)
     self.face_embed = FaceEmbedding(config)
     # standby phase, delayed face detection
     self.standby_detection_delay = float(
         config["TASK_SCHEDULER"]['Standby_detection_delay'])
     self.standby_max_analysis = int(
         config["TASK_SCHEDULER"]['Standby_max_analysis'])
     self.clustering_upload_delay = float(
         config["TASK_SCHEDULER"]['Clustering_upload_delay'])
     self.max_img_per_person = int(config["UPLOAD"]['Max_img_per_person'])
     self.face_lap_min_var = float(
         config["FACE_CONSOLIDATION"]['Laplacian_min_variance'])
     self.face_margin_w_ratio = float(
         config["FACE_CONSOLIDATION"]['Face_margin_w_ratio'])
     self.face_margin_h_ratio = float(
         config["FACE_CONSOLIDATION"]['Face_margin_h_ratio'])
     self.task_await = float(config["TASK_SCHEDULER"]['Task_await'])
     self.xnet_timeout = float(self.config["XNET"]['Timeout'])
     self.cam_width = int(config["CAMERA"]['Width'])
     self.cam_height = int(config["CAMERA"]['Height'])
     self.ROI = ast.literal_eval(config["CAMERA"]["ROI"])
     self.img_upload_width = int(config["UPLOAD"]['Img_width'])
     self.img_upload_height = int(config["UPLOAD"]['Img_height'])
     self.cam = cv2.VideoCapture(0)
     self.set_cam_params()
     self.xnet = Xnet(config)
     self.unprocess_face_queue = []
     self.face_queue = []
     self.feat_queue = []
     self.upload_ufq = []
     # self.standby_mode = True
     self.last_detected_face_time = time.time()
     self.last_clustering_time = time.time()
예제 #7
0
def main():
    cam = cv2.VideoCapture(0)
    # load data
    config = configparser.ConfigParser()
    config.read("config.ini")
    face_database, face_collection = loadMongoData(config)
    # load models
    device = "MYRIAD"
    data_dir = "data"
    plugin = IEPlugin(device, plugin_dirs=None)
    face_embed = FaceEmbedding(plugin)
    face_detect = MobileFaceDetect(plugin)
    # params
    fd_conf = 0.5
    fm_threshold = 0.6
    label = "new"
    period = 1
    button_pressed = False
    max_num = 3
    num = 0
    face_features = []
    face_imgs = []

    s = time.time()
    while (True):
        ret, frame = cam.read()
        if not ret:
            print("dead cam")
            cam = cv2.VideoCapture(0)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
            continue
        face_bboxes = []
        # if time.time() - s > period:
        if cv2.waitKey(1) & 0xFF == ord('c'):
            button_pressed = True
        if button_pressed and (num < max_num):
            face_bboxes = face_detect.inference(frame)
        if (len(face_bboxes) > 0) and button_pressed:
            areas = [area(box) for box in face_bboxes]
            max_id = np.argmax(np.asarray(areas))
            mfb = face_bboxes[max_id]
            main_face = frame[mfb[1]:mfb[3], mfb[0]:mfb[2], :]
            # TODO real face detection
            # TODO face alignment
            face_feature = face_embed.inference(main_face)
            face_feature = face_feature.tolist()
            face_features.append(face_feature)
            face_imgs.append(main_face)
            num += 1
            button_pressed = False
            s = time.time()
            # visualize for debug
            cv2.rectangle(frame, (mfb[0], mfb[1]), (mfb[2], mfb[3]),
                          (255, 0, 0), 2)
            cv2.putText(frame,
                        str(num), (mfb[0], mfb[1]),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        0.6, (0, 0, 255),
                        lineType=cv2.LINE_AA)
            print(num)

        if num >= max_num:
            # add new face features to database
            new_id = face_database.count()
            new_face = {'name': str(new_id), 'feats': face_features}
            p_id = face_collection.insert_one(new_face).inserted_id
            # commit changes
            face_collection.update_one({'_id': p_id}, {"$set": new_face},
                                       upsert=False)

            # save images
            img_dir = os.path.join(data_dir, str(new_id))
            os.mkdir(img_dir)
            for i, face in enumerate(face_imgs):
                img_path = os.path.join(img_dir, "{}.jpg".format(i))
                cv2.imwrite(img_path, face)
            face_imgs = []
            face_features = []
            num = 0
            s = time.time()
            print("done!")

        cv2.imshow("face registration", frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
예제 #8
0
class FaceQueueClustering(object):
    def __init__(self, parent=None):
        config = configparser.ConfigParser()
        config.read("config.ini")
        self.config = config
        self.face_detect = FaceDetection(config)
        self.face_embed = FaceEmbedding(config)
        # standby phase, delayed face detection
        self.standby_detection_delay = float(
            config["TASK_SCHEDULER"]['Standby_detection_delay'])
        self.standby_max_analysis = int(
            config["TASK_SCHEDULER"]['Standby_max_analysis'])
        self.clustering_upload_delay = float(
            config["TASK_SCHEDULER"]['Clustering_upload_delay'])
        self.max_img_per_person = int(config["UPLOAD"]['Max_img_per_person'])
        self.face_lap_min_var = float(
            config["FACE_CONSOLIDATION"]['Laplacian_min_variance'])
        self.face_margin_w_ratio = float(
            config["FACE_CONSOLIDATION"]['Face_margin_w_ratio'])
        self.face_margin_h_ratio = float(
            config["FACE_CONSOLIDATION"]['Face_margin_h_ratio'])
        self.task_await = float(config["TASK_SCHEDULER"]['Task_await'])
        self.xnet_timeout = float(self.config["XNET"]['Timeout'])
        self.cam_width = int(config["CAMERA"]['Width'])
        self.cam_height = int(config["CAMERA"]['Height'])
        self.ROI = ast.literal_eval(config["CAMERA"]["ROI"])
        self.img_upload_width = int(config["UPLOAD"]['Img_width'])
        self.img_upload_height = int(config["UPLOAD"]['Img_height'])
        self.cam = cv2.VideoCapture(0)
        self.set_cam_params()
        self.xnet = Xnet(config)
        self.unprocess_face_queue = []
        self.face_queue = []
        self.feat_queue = []
        self.upload_ufq = []
        # self.standby_mode = True
        self.last_detected_face_time = time.time()
        self.last_clustering_time = time.time()
        # self.num_cams = int(config["CAMERA"]['Num_cams'])
        # self.cam_ids = ast.literal_eval(config["CAMERA"]['Id'])

    def read_frame(self):
        s = time.time()
        ret, frame = self.cam.read()

        print("read cam time", time.time() - s)
        # print(frame.shape)
        if not ret:
            # dead cam
            self.cam = cv2.VideoCapture(0)
            self.set_cam_params()
            time.sleep(3.000)  # some delay to init cam
            # cam = cv2.VideoCapture(0 + cv2.CAP_DSHOW)
            return None
        r = self.ROI
        frame = frame[r[0]:r[2], r[1]:r[3], :]
        # cv2.imwrite("frame.jpg", frame)
        return frame

    def serve(self):
        while (True):
            if (time.time() -
                    self.last_clustering_time) > self.clustering_upload_delay:
                # asyncronous clustering/upload and sensing
                asyncio.run(self.task_scheduler())
            else:
                asyncio.run(self.sense_process(self.clustering_upload_delay))
        return 0

    async def task_scheduler(self):
        self.cluster_processed_feature()
        await asyncio.gather(
            self.upload(),
            self.sense_process(self.xnet_timeout),
        )
        print("finish upload/sensing block")
        return 0

    async def sense_process(self, duration):
        await asyncio.sleep(self.task_await)
        init_time = time.time()
        while (self.cam.isOpened()):
            print("unprocess: ", len(self.unprocess_face_queue), "processed: ",
                  len(self.face_queue))
            # get RAM info, TODO: save to disk when OOM
            # print("RAM", psutil.virtual_memory()[2])
            if (time.time() - init_time) > duration:
                print("finish sensing block")
                return 0
            if (time.time() - self.last_detected_face_time
                ) > self.standby_detection_delay:
                print("last face detected",
                      time.time() - self.last_detected_face_time)
                frame = self.read_frame()
                if frame is None:
                    continue
                st = time.time()
                self.standby_serve(frame)
                print("standby", time.time() - st)
            else:
                frame = self.read_frame()
                if frame is None:
                    continue
                st = time.time()
                self.active_serve(frame)
                print("active", time.time() - st)

    def set_cam_params(self):
        self.cam.set(3, self.cam_width)
        self.cam.set(4, self.cam_height)
        self.cam.set(cv2.CAP_PROP_BUFFERSIZE, 3)

    def standby_serve(self, frame):
        """
        1 face detection + n face analysis from queue
        """
        print("standby process detect")
        self.process_queue()
        self.detect_queue(frame, mode='standby')
        return 0

    def active_serve(self, frame, mode='active'):
        """
        1 face detection, save face to memory
        """
        self.detect_queue(frame)
        return 0

    def detect_queue(self, frame, mode='active'):
        face_bboxes = self.face_detect.inference(frame)
        print("faces detected: ", len(face_bboxes))
        if len(face_bboxes) == 0:
            return 1
        self.last_detected_face_time = time.time()
        # if mode == 'standby':
        #     return 0
        # print(face_bboxes)
        for b in face_bboxes:
            if (b[3] > b[1]) and (b[2] > b[0]):
                # detect blurr, headpose est
                # crop = frame[b[1]:b[3], b[0]:b[2], :]
                # crop = face_marginalize(crop, self.face_margin_w_ratio, self.face_margin_h_ratio)
                crop = face_crop(frame, b, 0, self.face_margin_h_ratio)
                blur_face = cv2.resize(crop, (112, 112))
                blur_face_var = cv2.Laplacian(blur_face, cv2.CV_64F).var()
                if blur_face_var > self.face_lap_min_var:
                    self.unprocess_face_queue.append({
                        # 'crop': cv2.resize(
                        #     crop,
                        #     (
                        #         self.img_upload_height,
                        #         self.img_upload_width
                        #     )
                        # ),
                        'crop':
                        resize_keepres(crop, self.img_upload_height,
                                       self.img_upload_width),
                        'time':
                        self.last_detected_face_time
                    })
                    # cv2.imwrite("face.jpg", crop)
        return 0

    def process_queue(self):
        """
        face embed, age, gender recognition
        TODO: memory management
        """
        for i in range(self.standby_max_analysis):
            if len(self.unprocess_face_queue) == 0:
                break
            face = self.unprocess_face_queue.pop(0)
            # print(face)
            input_face = face_marginalize(face['crop'],
                                          self.face_margin_w_ratio,
                                          self.face_margin_h_ratio)
            face_feature = self.face_embed.inference(input_face)
            self.face_queue.append({
                'crop': face['crop'],
                'time': face['time'],
                # 'feat': face_feature
            })
            self.feat_queue.append(face_feature)
        return 0

    def cluster_processed_feature(self):
        """
        cluster face queue into different people
        send several imgs and info per person to server
        # TODO: separate into cluster and upload functions
        """
        self.unique_faces = []
        # clustering
        if len(self.feat_queue) > 0:
            # print("no human")
            # self.last_clustering_time = time.time()
            # return 1
            print("cluster size", len(self.feat_queue), len(self.face_queue))
            labels = face_clustering(self.feat_queue)
            class_ids = np.unique(labels)
            print("unique ", class_ids)
            print(labels)
            for cli in class_ids:
                # noise
                if cli == -1:
                    continue
                if len(labels) > 1:
                    cli_feat_ids = np.asarray(np.where(labels == cli))
                    cli_feat_ids = np.squeeze(cli_feat_ids)
                    sample_size = cli_feat_ids.shape[0]
                    # print(sample_size, self.max_img_per_person)
                    # TODO handle sample_size < max_img_per_person
                    num_upload_imgs = min(self.max_img_per_person, sample_size)
                    chosen_ids = np.unique(
                        np.random.choice(sample_size,
                                         num_upload_imgs,
                                         replace=False))
                else:
                    cli_feat_ids = np.asarray([0])
                    chosen_ids = np.asarray([0])
                self.unique_faces.append({
                    'faces': [
                        b64_encode(self.face_queue[cli_feat_ids[i]]['crop'])
                        for i in chosen_ids
                    ],
                    'time': [self.face_queue[i]['time'] for i in cli_feat_ids],
                })
            print("num of unique people: ", len(self.unique_faces))

        self.last_clustering_time = time.time()
        # cleanup garbage
        self.feat_queue = []
        self.face_queue = []
        # convert b64 unprocess_face_queue
        self.upload_ufq = [{
            'face': b64_encode(ufq['crop']),
            'time': ufq['time'],
        } for ufq in self.unprocess_face_queue]
        self.unprocess_face_queue = []

    async def upload(self):
        # upload to server
        # st = time.time()
        await self.xnet.log_face(self.unique_faces, self.upload_ufq)
        # print("upload time: ", time.time() - st)
        self.upload_ufq = []
        return 0
예제 #9
0
class FaceQueueClustering(object):
    def __init__(self, parent=None):
        config = configparser.ConfigParser()
        config.read("config.ini")
        self.config = config
        self.face_detect = FaceDetection(config)
        self.face_embed = FaceEmbedding(config)
        # standby phase, delayed face detection
        self.standby_detection_delay = float(config["TASK_SCHEDULER"]['Standby_detection_delay'])
        self.standby_max_analysis = int(config["TASK_SCHEDULER"]['Standby_max_analysis'])
        self.clustering_upload_delay = float(config["TASK_SCHEDULER"]['Clustering_upload_delay'])
        self.max_img_per_person = int(config["UPLOAD"]['Max_img_per_person'])
        self.face_lap_min_var = float(config["FACE_CONSOLIDATION"]['Laplacian_min_variance'])
        self.face_margin_w_ratio = float(config["FACE_CONSOLIDATION"]['Face_margin_w_ratio'])
        self.face_margin_h_ratio = float(config["FACE_CONSOLIDATION"]['Face_margin_h_ratio'])
        self.xnet = Xnet(config)
        self.unprocess_face_queue = []
        self.face_queue = []
        self.feat_queue = []
        # self.standby_mode = True
        self.last_detected_face_time = time.time()
        self.last_clustering_time = time.time()
        # self.num_cams = int(config["CAMERA"]['Num_cams'])
        # self.cam_ids = ast.literal_eval(config["CAMERA"]['Id'])

    def serve(self):
        # TODO: multi-cam central processing
        # cams = [
        #     cv2.VideoCapture(i)
        #     for i in self.cam_ids
        # ]

        cam = cv2.VideoCapture(0)
        cam.set(3, 1920)
        cam.set(4, 1080)
        cam.set(cv2.CAP_PROP_BUFFERSIZE, 3)
        while(True):
            ret, frame = cam.read()
            print(frame.shape)
            if not ret:
                # dead cam
                cam = cv2.VideoCapture(0)
                time.sleep(3.000) # some delay to init cam
                # cam = cv2.VideoCapture(0 + cv2.CAP_DSHOW)
                continue
            if (time.time() - self.last_detected_face_time) > self.standby_detection_delay:
                # TODO: clustering and upload mode
                st = time.time()
                self.standby_serve(frame)
                print("standby", time.time() - st)
            else:
                st = time.time()
                self.active_serve(frame)
                print("active", time.time() - st)
            if (time.time() - self.last_clustering_time) > self.clustering_upload_delay:
                st = time.time()
                self.cluster_upload()
                print("clustering", time.time() - st)

        return 0

    def standby_serve(self, frame):
        """
        1 face detection + n face analysis from queue
        """
        self.process_queue()
        self.detect_queue(frame)
        return 0

    def active_serve(self, frame):
        """
        1 face detection, save face to memory
        TODO: memory management, e.g. save to disk before OOM
        """
        self.detect_queue(frame)
        return 0

    def detect_queue(self, frame):
        face_bboxes = self.face_detect.inference(frame)
        if len(face_bboxes) == 0:
            return 1
        # TODO: blurry analysis, head angle estimate
        self.last_detected_face_time = time.time()
        # print(face_bboxes)
        for b in face_bboxes:
            if (b[3] > b[1]) and (b[2] > b[0]):
                crop = frame[b[1]:b[3], b[0]:b[2], :]
                # detect blurr, headpose est
                # TODO: head pose est, move to separate function/class
                blur_face = cv2.resize(crop, (112, 112))
                blur_face_var = cv2.Laplacian(blur_face, cv2.CV_64F).var()
                if blur_face_var > self.face_lap_min_var:
                    self.unprocess_face_queue.append(
                        {
                            'crop': crop,
                            'time': self.last_detected_face_time
                        }
            )
        return 0

    def process_queue(self):
        """
        face embed, age, gender recognition
        TODO: memory management
        """
        for i in range(self.standby_max_analysis):
            if len(self.unprocess_face_queue) == 0:
                break
            face = self.unprocess_face_queue.pop(0)
            # print(face)
            input_face = face_marginalize(face['crop'], self.face_margin_w_ratio, self.face_margin_h_ratio)
            face_feature = self.face_embed.inference(input_face)
            self.face_queue.append(
                {
                    'crop': face['crop'],
                    'time': face['time'],
                    # 'feat': face_feature
                }
            )
            self.feat_queue.append(face_feature)
        return 0

    def cluster_upload(self):
        """
        cluster face queue into different people
        send several imgs and info per person to server
        # TODO: better time handling (timezone, cam location based, etc)
        """
        if len(self.feat_queue) == 0:
            print("no human")
            self.last_clustering_time = time.time()
            return 1
        print("cluster size", len(self.feat_queue), len(self.face_queue))
        labels = face_clustering(self.feat_queue)
        class_ids = np.unique(labels)
        unique_faces = []
        print("unique ", class_ids)
        print(labels)
        for cli in class_ids:
            # noise
            if cli == -1:
                continue
            if len(labels) > 1:
                cli_feat_ids = np.asarray(np.where(labels==cli))
                cli_feat_ids = np.squeeze(cli_feat_ids)
                sample_size = cli_feat_ids.shape[0]
                # print(sample_size, self.max_img_per_person)
                # TODO handle sample_size < max_img_per_person
                num_upload_imgs = min(self.max_img_per_person, sample_size)
                chosen_ids = np.unique(
                    np.random.choice(
                        sample_size,
                        num_upload_imgs,
                        replace=False
                    )
                )
            else:
                cli_feat_ids = np.asarray([0])
                chosen_ids = np.asarray([0])
            unique_faces.append(
                {
                    'faces': [
                        b64_encode(self.face_queue[cli_feat_ids[i]]['crop'])
                        for i in chosen_ids
                    ],
                    'time': [
                        self.face_queue[i]['time']
                        for i in cli_feat_ids
                    ]
                }
            )
        print("num of unique people: ", len(unique_faces))

        self.xnet.log_face(unique_faces)
        # cleanup garbage
        self.feat_queue = []
        self.face_queue = []
        self.last_clustering_time = time.time()
        return 0
예제 #10
0
class FaceRecognition(object):
    def __init__(self, parent=None):
        config = configparser.ConfigParser()
        config.read("config.ini")
        device = "MYRIAD"

        self.config = config
        self.face_database, self.face_collection = self.loadFaceData()
        self.log_database, self.log_collection = self.loadLogData()
        plugin = IEPlugin(device, plugin_dirs=None)
        self.face_embed = FaceEmbedding(plugin)
        self.face_detect = MobileFaceDetect(plugin)
        self.head_pose = HeadPoseEst(plugin)
        self.ch = int(config["CAMERA"]['Height'])
        self.cw = int(config["CAMERA"]['Width'])
        self.door_host = config["DOOR_CONTROLLER"]['Host']
        self.door_port = int(config["DOOR_CONTROLLER"]['Port'])
        self.door_name = config["DOOR_CONTROLLER"]['Name']
        self.door_url = "http://{}:{}{}".format(self.door_host, self.door_name,
                                                self.door_port)
        self.door_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.door_socket.connect((self.door_host, self.door_port))
        self.open_door_signal = config["DOOR_CONTROLLER"]['Open_door_signal']
        self.open_door_signal = self.open_door_signal.encode()

        self.face_lap_min_score = float(
            config["MOBILE_FACE_DET"]['Laplacian_min_score'])
        self.face_min_size = float(
            config["FACE_CONSOLIDATION"]['Face_min_ratio']) * (self.ch *
                                                               self.cw)
        self.face_margin = int(config["FACE_CONSOLIDATION"]['Face_margin'])
        self.fm_threshold = float(config["FACE_MATCH"]['Face_threshold'])
        # self.face_counter = int(config["FACE_MATCH"]['Counter'])
        # self.ct_threshold = float(config["FACE_MATCH"]['Counter_threshold'])
        self.debug = int(config["FACE_MATCH"]['Debug'])
        self.no_face_frame_limit = int(config["DELAY"]['No_face_frame'])
        self.recognition_delay = float(config["DELAY"]['Recognition_delay'])
        self.recog_suc_delay = float(config["DELAY"]['Recognize_success'])
        self.recog_fai_delay = float(config["DELAY"]['Recognize_failed'])
        self.angle_min = ast.literal_eval(config["HEAD_POSE"]["Angle_min"])
        self.angle_max = ast.literal_eval(config["HEAD_POSE"]["Angle_max"])
        self.log_img_dir = config["IMAGE_DIR"]['Log']
        self.window_mult = float(config["DISPLAY"]['Window_mult'])
        self.max_signal_send = int(
            config["DOOR_CONTROLLER"]['Max_signal_send'])
        self.display_draw = DisplayDraw()
        self.test_face = False
        self.face_img = cv2.imread("test_img/test_face.jpg")
        self.face_img = cv2.resize(self.face_img, (120, 180))
        if not os.path.exists(self.log_img_dir):
            os.mkdir(self.log_img_dir)

    def serve(self):
        face_rec_delay = time.time()
        no_face_frame = 0
        cv2.namedWindow('gandalf', cv2.WINDOW_NORMAL)
        cv2.resizeWindow('gandalf', int(self.cw * self.window_mult),
                         int(self.ch * self.window_mult))
        cv2.setWindowProperty('gandalf', cv2.WND_PROP_FULLSCREEN,
                              cv2.WINDOW_FULLSCREEN)
        cam = cv2.VideoCapture(0)
        # cam = cv2.VideoCapture(0 + cv2.CAP_DSHOW)
        # label = "Hello"
        while (True):
            ret, frame = cam.read()
            # h, w, c = frame.shape
            # print(h, w, c)
            if not ret:
                # dead cam
                cam = cv2.VideoCapture(0)
                time.sleep(3.000)  # some delay to init cam
                # cam = cv2.VideoCapture(0 + cv2.CAP_DSHOW)
                continue
                # cv2.destroyAllWindows()
                # return 1
            if self.test_face:
                frame[120:300, 280:400, :] = self.face_img
                # add test face on frame

            # go from standby to face detection phase
            # if no_face_frame > self.no_face_frame_limit:
            #     # no error
            #     cv2.destroyAllWindows()
            #     return 0
            # face detection phase
            face_bboxes = self.face_detect.inference(frame)
            if len(face_bboxes) > 0:
                no_face_frame = 0
                areas = [area(box) for box in face_bboxes]
                max_id = np.argmax(np.asarray(areas))
                mfb = face_bboxes[max_id]
                # print(area(mfb), self.face_min_size, frame.shape)
                # face consolidation phase, calculate face angle
                if area(mfb) > self.face_min_size:
                    x0 = max(0, mfb[0] - self.face_margin)
                    y0 = max(0, mfb[1] - self.face_margin)
                    x1 = min(self.ch, mfb[2] + self.face_margin)
                    y1 = min(self.cw, mfb[3] + self.face_margin)
                    main_head = frame[y0:y1, x0:x1, :]

                    # detect blurry face
                    h, w, c = main_head.shape
                    # print("hp img shape: ", img.shape)
                    if (h > 0) and (w > 0):
                        blur_face = cv2.resize(main_head, (112, 112))
                        blur_face_var = cv2.Laplacian(blur_face,
                                                      cv2.CV_64F).var()
                        if blur_face_var < self.face_lap_min_score:
                            cv2.rectangle(frame, (mfb[0], mfb[1]),
                                          (mfb[2], mfb[3]), (255, 0, 0), 2)
                            face_rec_delay_amount = time.time(
                            ) - face_rec_delay
                            if face_rec_delay_amount > self.recognition_delay:
                                frame = self.display_draw.drawLACText(frame)
                            else:
                                frame = self.display_draw.drawLastText(frame)
                                # label = "please look at the camera"
                            cv2.imshow("gandalf", frame)
                            if cv2.waitKey(1) & 0xFF == ord('q'):
                                cv2.destroyAllWindows()
                                return 2
                            continue

                    # detect head pose
                    yaw, pitch, roll = self.head_pose.inference(main_head)
                    if not good_head_angle(yaw, pitch, roll, self.angle_min,
                                           self.angle_max):
                        cv2.rectangle(frame, (mfb[0], mfb[1]),
                                      (mfb[2], mfb[3]), (255, 0, 0), 2)
                        face_rec_delay_amount = time.time() - face_rec_delay
                        if face_rec_delay_amount > self.recognition_delay:
                            frame = self.display_draw.drawLACText(frame)
                        else:
                            frame = self.display_draw.drawLastText(frame)
                        cv2.imshow("gandalf", frame)
                        if cv2.waitKey(1) & 0xFF == ord('q'):
                            cv2.destroyAllWindows()
                            return 2
                        continue

                    # TODO: face liveness detection
                else:
                    # face too small
                    face_rec_delay_amount = time.time() - face_rec_delay
                    if face_rec_delay_amount > self.recognition_delay:
                        frame = self.display_draw.drawMCText(frame)
                    else:
                        frame = self.display_draw.drawLastText(frame)
                    cv2.imshow("gandalf", frame)
                    if cv2.waitKey(1) & 0xFF == ord('q'):
                        cv2.destroyAllWindows()
                        return 2
                    continue

                # face recognition phase
                face_rec_delay_amount = time.time() - face_rec_delay
                if face_rec_delay_amount >= self.recognition_delay:
                    main_face = frame[mfb[1]:mfb[3], mfb[0]:mfb[2], :]
                    # TODO face alignment
                    face_feature = self.face_embed.inference(main_face)
                    best_match = bruteforce(face_feature, self.face_database,
                                            self.fm_threshold)
                    # TODO face record
                    if best_match is None:
                        new_log = {
                            'result': 'failed',
                            'time':
                            datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                        }
                        self.updateLog(new_log, main_face)
                        self.display_draw.drawFailedText(frame)
                        self.recognition_delay = self.recog_fai_delay
                    else:
                        self.callDoorControllerSocket()
                        new_log = {
                            'result': 'success',
                            'face_id': best_match['_id'],
                            'time':
                            datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                        }
                        self.updateLog(new_log, main_face)
                        self.display_draw.drawSuccessText(
                            frame, str(best_match['name']))
                        self.recognition_delay = self.recog_suc_delay
                    face_rec_delay = time.time()
                cv2.rectangle(frame, (mfb[0], mfb[1]), (mfb[2], mfb[3]),
                              (255, 0, 0), 2)
            else:
                no_face_frame += 1
                face_rec_delay_amount = time.time() - face_rec_delay
                if face_rec_delay_amount > self.recognition_delay:
                    frame = self.display_draw.drawDefaultText(frame)
                else:
                    frame = self.display_draw.drawLastText(frame)
            cv2.imshow("gandalf", frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                cv2.destroyAllWindows()
                return 2
            elif cv2.waitKey(1) & 0xFF == ord('t'):
                self.test_face = True
            elif cv2.waitKey(1) & 0xFF == ord('y'):
                self.test_face = False

    def loadFaceData(self):
        """
        return cursor and collection
        """
        url = self.config["MONGO"]['Url']
        port = int(self.config["MONGO"]['Port'])
        db_name = self.config["MONGO"]['Database']
        col_name = self.config["MONGO"]['FaceCollection']
        client = MongoClient(url, port)
        db = client[db_name]
        collection = db[col_name]
        # get the whole collection
        people = list(collection.find())
        return people, collection

    def loadLogData(self):
        """
        return collection
        """
        url = self.config["MONGO"]['Url']
        port = int(self.config["MONGO"]['Port'])
        db_name = self.config["MONGO"]['Database']
        col_name = self.config["MONGO"]['LogCollection']
        client = MongoClient(url, port)
        db = client[db_name]
        collection = db[col_name]
        # get the whole collection
        logs = list(collection.find())
        return logs, collection

    def updateLog(self, new_log, face_img):
        p_id = self.log_collection.insert_one(new_log).inserted_id
        self.log_collection.update_one({'_id': p_id}, {"$set": new_log},
                                       upsert=False)
        new_img_name = "{}_{}.jpg".format(new_log['time'], new_log['result'])
        # remove special chars
        new_img_name = new_img_name.replace("-", "_")
        new_img_name = new_img_name.replace(":", "_")
        new_img_name = new_img_name.replace(" ", "_")
        new_img_path = os.path.join(self.log_img_dir, new_img_name)
        # print(new_img_path)
        cv2.imwrite(new_img_path, face_img)
        return p_id

    def callDoorControllerPost(self, signal):
        body = {
            'signal': signal,
            'time': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
        }
        header = {
            "Content-type": "application/x-www-form-urlencoded",
            "Accept": "text/plain"
        }
        body_json = json.dumps(body)
        r = requests.post(self.door_url, data=body_json, headers=header)
        print(r.json())

    def callDoorControllerSocket(self):
        # check connection
        try:
            ready_to_read, ready_to_write, in_error = \
                select.select([self.door_socket,], [self.door_socket,], [], 5)
        except select.error:
            print(select.error)
            # shutdown connection
            self.door_socket.shutdown(
                2)  # 0 = done receiving, 1 = done sending, 2 = both
            self.door_socket.close()
            self.door_socket = socket.socket(socket.AF_INET,
                                             socket.SOCK_STREAM)
            # try to reconnect
            self.door_socket.connect((self.door_host, self.door_port))
        for i in range(self.max_signal_send):
            print("sending to door controller")
            self.door_socket.sendall(self.open_door_signal)
예제 #11
0
class FaceUpdate(object):
    def __init__(self, parent=None):
        config = configparser.ConfigParser()
        config.read("config.ini")
        self.config = config
        self.face_db, self.face_col = self.loadFaceData()
        self.log_db, self.log_col = self.loadLogData()
        self.face_img_dir = config["IMAGE_DIR"]['Employee']
        self.log_img_dir = config["IMAGE_DIR"]['Log']
        self.face_lap_min_score = float(config["MOBILE_FACE_DET"]['Laplacian_min_score_update'])
        self.fm_threshold = float(config["FACE_MATCH"]['Face_update_threshold_max'])
        self.fm_identical_threshold = float(config["FACE_MATCH"]['Face_update_threshold_min'])
        self.max_img_per_acc = int(config["MONGO"]['Max_img_per_acc'])
        device = "MYRIAD"
        plugin = IEPlugin(device, plugin_dirs=None)
        self.face_embed = FaceEmbedding(plugin)

    def loadFaceData(self):
        """
        return cursor and collection
        """
        url = self.config["MONGO"]['Url']
        port = int(self.config["MONGO"]['Port'])
        db_name = self.config["MONGO"]['Database']
        col_name = self.config["MONGO"]['FaceCollection']
        client = MongoClient(url, port)
        db = client[db_name]
        collection = db[col_name]
        # get the whole collection
        people = list(collection.find())
        return people, collection

    def loadLogData(self):
        """
        return collection
        """
        url = self.config["MONGO"]['Url']
        port = int(self.config["MONGO"]['Port'])
        db_name = self.config["MONGO"]['Database']
        col_name = self.config["MONGO"]['LogCollection']
        client = MongoClient(url, port)
        db = client[db_name]
        collection = db[col_name]
        # get the whole collection
        logs = list(collection.find())
        return logs, collection

    def dailyUpdate(self, prev_days=1):
        """
        update all faces in defined previous days
        """
        epoch = datetime.now() - timedelta(days=prev_days)
        epoch_str = epoch.strftime('%Y-%m-%d %H:%M:%S')
        # success_logs = [log for log in self.log_db if log['result'] == 'success']
        success_logs = [
            log for log in self.log_db 
            if (log['result'] == 'success') and (log['time'] > epoch_str)
        ]
        for log in success_logs:
            # load face based on log
            log_id = str(log['face_id'])
            face_acc = self.face_col.find_one({"_id": ObjectId(log_id)})
            # get associated log image
            log_img_name = "{}_{}.jpg".format(log['time'], log['result'])
            # remove special chars
            log_img_name = log_img_name.replace("-", "_")
            log_img_name = log_img_name.replace(":", "_")
            log_img_name = log_img_name.replace(" ", "_")
            log_img_path = os.path.join(self.log_img_dir, log_img_name)
            log_img = cv2.imread(log_img_path)
            # calculate blurness
            blur_face = cv2.resize(log_img, (112, 112))
            blur_face_var = cv2.Laplacian(blur_face, cv2.CV_64F).var()
            if blur_face_var < self.face_lap_min_score:
                print(log_img_path, " : face too blurry")
                continue
            # calculate arcface score
            log_face_feat = self.face_embed.inference(log_img)
            # get smallest cosine distance
            min_dst = 100
            best_feat = None
            for feat in face_acc['feats']:
                feat_np = np.asarray(feat)
                cos_dst = cosineDistance(log_face_feat, feat_np)
                if (cos_dst < self.fm_threshold) and (cos_dst < min_dst):
                    min_dst = cos_dst
                    best_feat = feat_np

            if min_dst > self.fm_threshold:
                print(log_img_path, " : face too different", min_dst)
                continue
            elif min_dst < self.fm_identical_threshold:
                print(log_img_path, " : face too similar", min_dst)
                continue
            
            print(log_img_path, " : added!")
            # update database, if number of imgs per acc exceed max allowance,
            # delete first recorded feat, keep image
            face_acc['feats'].append(best_feat.tolist())
            if len(face_acc['feats']) > self.max_img_per_acc:
                old_feat = face_acc['feats'].pop(0)
            self.face_col.update_one({'_id': face_acc['_id']}, {"$set": face_acc}, upsert=False)
            # save new image
            face_acc_img_dir = os.path.join(self.face_img_dir, str(face_acc['_id']))
            new_log_img_path = os.path.join(face_acc_img_dir, log_img_name)
            cv2.imwrite(new_log_img_path, log_img)