def build_matcher(data_struct): matcher = FaissMatcher() embs = [] labels = [] for label in data_struct["folder_structure"]: for tracker in data_struct["folder_structure"][label]: for image_id in data_struct["folder_structure"][label][tracker]: labels.append(image_id) embs.append(data_struct["regdict"][image_id]["embedding"]) matcher.fit(embs, labels) #build matcher return matcher
def main(matcher_path, test_path): m_trackers_paths = glob.glob(matcher_path + '/*') t_trackers_paths = glob.glob(test_path + '/*') tracker_manager = TrackerManager('test') matcher = FaissMatcher() preprocessor = Preprocessor() align_preprocessor = Preprocessor(algs=align_and_crop) face_rec_graph_face = FaceGraph() face_extractor = FacenetExtractor(face_rec_graph_face, model_path=Config.FACENET_DIR) detector = MTCNNDetector(face_rec_graph_face) # create matcher print('Creating matcher ...') for m_dir in m_trackers_paths: print('Processing ' + m_dir) face_id = m_dir.split('/')[-1] embs, labels = extract_embs(m_dir, preprocessor, face_extractor, None) face_id_labels = [face_id for i in range(len(labels))] matcher.update(embs, face_id_labels) # create tracker print('Creating trackers') for t_dir in t_trackers_paths: print('Processing ' + t_dir) embs, _ = extract_embs(t_dir, preprocessor, face_extractor, None) track_id = int(t_dir.split('/')[-1]) first_emb = embs.pop() face_info = FaceInfo(None, first_emb, None, None, None, None) tracker_manager.current_trackers[track_id] = Tracker( track_id, face_info, None) for emb in embs: face_info = FaceInfo(None, emb, None, None, None, None) tracker_manager.current_trackers[track_id].update(face_info, None) len(tracker_manager.current_trackers) # test matching print('Test matching ...') for fid in tracker_manager.current_trackers: print('Processing: ' + str(fid)) tops = tracker_manager.recognize_current_tracker(fid, matcher, None) print('Track_id {}, recognize: {}'.format(fid, tops))
def setup_matcher(self): with open('../data/top10querylog.txt', 'a') as f: f.write('TOP10 QUERY IS BEING IN PROCESS !!!\n\n') ''' self.embs = [] self.labels = [] cursors = self.mongo.mongodb_dashinfo.find({}) unique_labels = [cursor['represent_image_id'] for cursor in cursors] cursors = self.mongo.mongodb_faceinfo.find({'image_id': {'$in': unique_labels}}) for cursor in cursors: self.embs.append(np.array(cursor['embedding'])) self.labels.append(cursor['image_id']) self.nof_registered_image_ids = len(self.labels) ''' self.labels, self.embs = self.database.get_labels_and_embs_dashboard() self.matcher = FaissMatcher() self.matcher.fit(self.embs, self.labels) with open('../data/top10querylog.txt', 'a') as f: f.write('MATCHER BUILT!!!\n\n')
def generic_function(cam_url, queue_reader, area, face_extractor_model, re_source, multi_thread): ''' This is main function ''' print("Generic function") print("Cam URL: {}".format(cam_url)) print("Area: {}".format(area)) # Variables for tracking faces frame_counter = 0 # Variables holding the correlation trackers and the name per faceid tracking_dirs = glob.glob(Config.TRACKING_DIR + '/*') if tracking_dirs == []: number_of_existing_trackers = 0 else: lof_int_trackid = [ int(tracking_dir.split('/')[-1]) for tracking_dir in tracking_dirs ] number_of_existing_trackers = max(lof_int_trackid) + 1 imageid_to_keyid = {} tracker_manager = TrackerManager( area, imageid_to_keyid=imageid_to_keyid, current_id=number_of_existing_trackers) if Config.Matcher.CLEAR_SESSION: clear_session_folder() global querying_top10_image_ids_queue # mongodb_faceinfo.remove({}) # reg_dict = PickleUtils.read_pickle(Config.REG_IMAGE_FACE_DICT_FILE) # if reg_dict is not None: # for fid in reg_dict: # mongodb_faceinfo.insert_one({'image_id': fid, 'face_id': reg_dict[fid]}) # print('Saved regdict in mongodb as collection regdict') matcher = FaissMatcher() matcher.build( mongodb_faceinfo, imageid_to_keyid=imageid_to_keyid, use_image_id=True) svm_matcher = None if Config.Matcher.CLOSE_SET_SVM: svm_matcher = SVMMatcher() svm_matcher.build(mongodb_faceinfo) track_results = TrackerResultsDict() if Config.CALC_FPS: start_time = time.time() if cam_url is not None: frame_reader = URLFrameReader(cam_url, scale_factor=1) elif queue_reader is not None: frame_reader = RabbitFrameReader(rabbit_mq, queue_reader) elif args.anno_mode: frame_reader = URLFrameReader('./nothing.mp4', scale_factor=1) else: print('Empty Image Source') return -1 if not args.anno_mode: video_out_fps, video_out_w, video_out_h, = frame_reader.get_info() print(video_out_fps, video_out_w, video_out_h) bbox = [ int(Config.Frame.ROI_CROP[0] * video_out_w), int(Config.Frame.ROI_CROP[1] * video_out_h), int(Config.Frame.ROI_CROP[2] * video_out_w), int(Config.Frame.ROI_CROP[3] * video_out_h) ] # bbox = [0, 0, video_out_w, video_out_h] video_out = None if Config.Track.TRACKING_VIDEO_OUT: video_out = VideoHandle(time.time(), video_out_fps, int(video_out_w), int(video_out_h)) # Turn on querying top 10 from queue if Config.QUERY_TOP10_MODE: thread = threading.Thread(target=give_this_id_10_closest_ids) thread.daemon = True thread.start() frame_queue = [] lock = threading.Lock() if multi_thread: is_on = [True] t = threading.Thread(target=(get_frames), args=(frame_queue, frame_reader, re_source, \ cam_url, queue_reader, lock, is_on, )) t.start() try: while True: ms_msg = rabbit_mq.receive_str(Config.Queues.MERGE) ms_flag = 'merge' if ms_msg is None: ms_msg = rabbit_mq.receive_str(Config.Queues.SPLIT) ms_flag = 'split' if ms_msg is not None: merge_anchor, merge_list = extract_info_from_json(ms_msg) while merge_list != []: image_id1 = merge_list.pop() split_merge_id(ms_flag, image_id1, merge_anchor, matcher, preprocessor, face_extractor, tracker_manager, mongodb_dashinfo, mongodb_faceinfo, mongodb_mslog) continue action_msg = rabbit_mq.receive_str(Config.Queues.ACTION) if action_msg is not None: return_dict = json.loads(action_msg) print('Receive: {}'.format(return_dict)) if return_dict['actionType'] == 'getNearest': lock.acquire() querying_top10_image_ids_queue.append(return_dict['data']) lock.release() continue if args.anno_mode: print('Annotation Mode, waiting for new tasks ...') time.sleep(1) continue if multi_thread: if len(frame_queue) > 0: lock.acquire() frame = frame_queue.pop(0) lock.release() else: frame = None else: frame = frame_reader.next_frame() tracker_manager.update_trackers(frame) #do this before check_and_recognize tracker (sync local matcher vs mongodb) trackers_return_dict, recognized_results = update_recognition( self, matcher, svm_matcher, mongodb_faceinfo) for tracker, matching_result_dict in recognized_results: handle_results(tracker, matching_result_dict, imageid_to_keyid = imageid_to_keyid, \ dump=False) # trackers_return_dict = tracker_manager.find_and_process_end_track(mongodb_faceinfo) track_results.merge(trackers_return_dict) tracker_manager.long_term_history.check_time( matcher, mongodb_faceinfo) if frame is None: print("Waiting for the new image") # if Config.Track.RECOGNIZE_FULL_TRACK: # overtime_track_ids = tracker_manager.find_overtime_current_trackers( # time_last=Config.Track.CURRENT_EXTRACR_TIMER-5, # find_unrecognized=True # ) # for overtime_track_id in overtime_track_ids: # checking_tracker, top_predicted_face_ids, matching_result_dict = \ # tracker_manager.check_and_recognize_tracker( # matcher, # overtime_track_id, # mongodb_faceinfo, # svm_matcher) # handle_results(checking_tracker, matching_result_dict, imageid_to_keyid = imageid_to_keyid,\ # dump=False) if re_source and not multi_thread: print('Trying to connect the stream again ...') if cam_url is not None: frame_reader = URLFrameReader(cam_url, scale_factor=1) elif queue_reader is not None: frame_reader = RabbitFrameReader( rabbit_mq, queue_reader) else: print('Empty Image Source') return -1 break print("Frame ID: %d" % frame_counter) if "_rotate" in video: # rotate cw rotation = int(video.split("_")[-1].split(".")[0]) frame = rotate_image_90(frame, rotation) if Config.Track.TRACKING_VIDEO_OUT: video_out.tmp_video_out(frame) if Config.CALC_FPS: fps_counter = time.time() if frame_counter % Config.Frame.FRAME_INTERVAL == 0: # crop frame #frame = frame[bbox[1]:bbox[3], bbox[0]:bbox[2],:] origin_bbs, points = detector.detect_face(frame) if len(origin_bbs) > 0: origin_bbs = [origin_bb[:4] for origin_bb in origin_bbs] display_and_padded_faces = [ CropperUtils.crop_display_face(frame, origin_bb) for origin_bb in origin_bbs ] #cropped_faces = [CropperUtils.crop_face(frame, origin_bb) for origin_bb in origin_bbs] preprocessed_images = [ preprocessor.process( CropperUtils.crop_face(frame, origin_bb)) for origin_bb in origin_bbs ] embeddings_array, _ = face_extractor.extract_features_all_at_once( preprocessed_images) for i, origin_bb in enumerate(origin_bbs): display_face, str_padded_bbox = display_and_padded_faces[i] #cropped_face = CropperUtils.crop_face(frame, origin_bb) # Calculate embedding preprocessed_image = preprocessed_images[i] # preprocessed_image = align_preprocessor.process(frame, points[:,i], aligner, 160) emb_array = np.asarray([embeddings_array[i]]) face_info = FaceInfo( #oigin_bb.tolist(), #emb_array, frame_counter, origin_bb, points[:, i] #display_face, #str_padded_bbox, #points[:,i].tolist() ) is_good_face = handle_filters(points[:, i], coeff_extractor, face_info, preprocessed_image) face_info.is_good = is_good_face # TODO: refractor matching_detected_face_with_trackers matched_track_id = tracker_manager.track(face_info) if not face_info.is_good: print('BAD FACE') continue # Update tracker_manager tracker_manager.update(matched_track_id, frame, face_info) checking_tracker = None # if not Config.Track.RECOGNIZE_FULL_TRACK: # checking_tracker, top_predicted_face_ids, matching_result_dict = \ # tracker_manager.check_and_recognize_tracker( # matcher, # matched_track_id, # mongodb_faceinfo, # svm_matcher) # handle_results(checking_tracker, matching_result_dict, imageid_to_keyid = imageid_to_keyid, \ # dump=True) # if Config.Track.RECOGNIZE_FULL_TRACK: # overtime_track_ids = tracker_manager.find_overtime_current_trackers( # time_last=Config.Track.CURRENT_EXTRACR_TIMER-5, # find_unrecognized=True # ) # for overtime_track_id in overtime_track_ids: # checking_tracker, top_predicted_face_ids, matching_result_dict = \ # tracker_manager.check_and_recognize_tracker( # matcher, # overtime_track_id, # mongodb_faceinfo, # svm_matcher) # handle_results(checking_tracker, matching_result_dict, imageid_to_keyid = imageid_to_keyid, \ # dump=False) frame_counter += 1 if Config.CALC_FPS: print("FPS: %f" % (1 / (time.time() - fps_counter))) if Config.Track.TRACKING_VIDEO_OUT: print('Write track video') video_out.write_track_video(track_results.tracker_results_dict) Config.Track.CURRENT_EXTRACR_TIMER = 0 trackers_return_dict = tracker_manager.find_and_process_end_track( mongodb_faceinfo) Config.Track.HISTORY_CHECK_TIMER = 0 Config.Track.HISTORY_EXTRACT_TIMER = 0 tracker_manager.long_term_history.check_time(matcher, mongodb_faceinfo) except KeyboardInterrupt: if multi_thread: is_on[0] = False t.join() print('Keyboard Interrupt !!! Release All !!!') Config.Track.CURRENT_EXTRACR_TIMER = 0 trackers_return_dict = tracker_manager.find_and_process_end_track( mongodb_faceinfo) Config.Track.HISTORY_CHECK_TIMER = 0 Config.Track.HISTORY_EXTRACT_TIMER = 0 tracker_manager.long_term_history.check_time(matcher, mongodb_faceinfo) if Config.CALC_FPS: print('Time elapsed: {}'.format(time.time() - start_time)) print('Avg FPS: {}'.format( (frame_counter + 1) / (time.time() - start_time))) frame_reader.release() if Config.Track.TRACKING_VIDEO_OUT: print('Write track video') video_out.write_track_video(track_results.tracker_results_dict) video_out.release()
def give_this_id_10_closest_ids(): # init matcher with open('../data/top10querylog.txt', 'a') as f: f.write('TOP10 QUERY IS BEING IN PROCESS !!!\n\n') global querying_top10_image_ids_queue global mongodb_faceinfo global mongodb_dashinfo embs = [] labels = [] cursors = mongodb_dashinfo.find({}) unique_labels = [cursor['represent_image_id'] for cursor in cursors] cursors = mongodb_faceinfo.find({'image_id': {'$in': unique_labels}}) for cursor in cursors: embs.append(np.array(cursor['embedding'])) labels.append(cursor['image_id']) nof_registered_image_ids = len(labels) matcher = FaissMatcher() matcher.fit(embs, labels) with open('../data/top10querylog.txt', 'a') as f: f.write('MATCHER BUILT!!!\n\n') while True: if nof_registered_image_ids < mongodb_dashinfo.find({}).count(): nof_registered_image_ids = mongodb_dashinfo.find({}).count() print('[Query TOP10] Update new registered image_id ...') cursors = mongodb_dashinfo.find({ 'represent_image_id': { '$nin': labels } }) unique_labels = [cursor['represent_image_id'] for cursor in cursors] cursors = mongodb_faceinfo.find({ 'image_id': { '$in': unique_labels } }) adding_embs = [] adding_labels = [] for cursor in cursors: adding_embs.append(np.array(cursor['embedding'])) adding_labels.append(cursor['image_id']) old_embs = embs old_labels = labels embs = old_embs + adding_embs labels = old_labels + adding_labels matcher.update(adding_embs, adding_labels) if not len(querying_top10_image_ids_queue) == 0: lock.acquire() queue_data = querying_top10_image_ids_queue.pop() lock.release() results = {} session_id = queue_data['sessionId'] image_id = queue_data['imageId'] print('[Query TOP10] image_id: ' + image_id) with open('../data/top10querylog.txt', 'a') as f: f.write('image_id: ' + image_id + '\n') cursors = mongodb_faceinfo.find({'image_id': image_id}) if cursors.count() == 0: print('[Query TOP10] THIS QUERY IMAGE ID HAS YET TO REGISTER') with open('../data/top10querylog.txt', 'a') as f: f.write('THIS QUERY IMAGE ID HAS YET TO REGISTER\n') face_id = mongodb_dashinfo.find({ 'represent_image_id': image_id })[0]['face_id'] unique_labels = [ cursor['represent_image_id'] for cursor in mongodb_dashinfo.find({ 'face_id': face_id }) ] for label in unique_labels: results[label] = '0' else: query_emb = cursors[0]['embedding'] dists, inds = matcher._classifier.search( np.array(query_emb).astype('float32'), k=15) dists = np.squeeze(dists) inds = np.squeeze(inds) top_match_ids = [labels[idx] for idx in inds] for i, top_match_id in enumerate(top_match_ids): if i < 11 and top_match_id != image_id: results[top_match_id] = str(dists[i]) msg_results = { 'actionType': 'getNearest', 'sessionId': session_id, 'data': { 'images': results } } with open('../data/top10querylog.txt', 'a') as f: f.write('Result: \n{}\n\n'.format(results)) print('[Query TOP10] Result: \n{}'.format(results)) rabbit_mq.send_with_exchange(Config.Queues.ACTION_RESULT, session_id, json.dumps(msg_results)) # Those cmt for querying tracker from the image ids tracker # query_track_id = int(image_id.split('_')[0]) # query_embs = [cursor['embedding'] for cursor in mongodb_faceinfo.find({'track_id': query_track_id})] # for emb in query_embs: # predict_id, _, min_dist = matcher.match(emb, return_min_dist=True) # if not predict_id in predicted_dict: # predicted_dict[predict_id] = [] # predicted_dict[predict_id].append(min_dist) # avg_predicted_dict = {pid: sum(predicted_dict[pid])/float(len(predicted_dict[pid])) # for pid in predicted_dict} # sorted_predicted_ids = sorted(avg_predicted_dict.items(), key=lambda kv: kv[1]) # with open('../data/query_top10_log.txt') as f: # f.write('Query IMAGE_ID: ' + image_id + '\n') # f.write('Results: {} \n\n'.format(sorted_predicted_ids)) # str_results = [] # for closest_id, dist in sorted_predicted_ids: # str_results.append(Config.Rabbit.INTRA_SEP.join([closest_id, str(dist)])) # result_msg = Config.Rabbit.INTER_SEP.join(str_results) else: time.sleep(1)
from frame_reader import QueueFrameReader # read config if True: configs = [] with open('../config.txt', 'r') as f: configs = f.readlines() configs = [txt_config.strip('\n') for txt_config in configs] Config.DEMO_FOR = configs[0] Config.Rabbit.IP_ADDRESS = configs[1] face_rec_graph = FaceGraph() face_extractor = FacenetExtractor(face_rec_graph, model_path=Config.FACENET_DIR) detector = MTCNNDetector(face_rec_graph) preprocessor = Preprocessor() matcher = FaissMatcher() matcher._match_case = 'TCH' matcher.build(Config.REG_IMAGE_FACE_DICT_FILE) rb = RabbitMQ() frame_readers = dict() register_command = dict() # {session_id: [[register_name, video_path]]} removed_sessions = Queue() sent_msg_queue = Queue() start_time = time.time() while True: # if time.time() - start_time >= 10.0: # try: # while True: # rm_id = removed_sessions.get(False)
def general_process(lock_id, detector, preprocessor, face_extractor, blynk_locker): ''' INPUT: lock_id ''' # Get locker infomation # lock_id = 'query from mongo' locker_info = mongodb_lockersinfo.find({'lock_id': lock_id})[0] this_locker = mongodb_lockers.find({'lock_id': lock_id})[0] cam_url = locker_info['cam_url'] status = this_locker['status'] blynk_locker.processing(status) # init face info mongodb_faceinfo = mongodb_db[str(lock_id)] # Variables for tracking faces frame_counter = 0 start_time = time.time() acceptable_spoofing = 0 # Variables holding the correlation trackers and the name per faceid tracking_folder = os.path.join(Config.TRACKING_DIR, str(lock_id)) create_if_not_exist(tracking_folder) tracking_dirs = glob.glob(tracking_folder + '/*') if tracking_dirs == []: number_of_existing_trackers = 0 else: lof_int_trackid = [ int(tracking_dir.split('/')[-1]) for tracking_dir in tracking_dirs ] number_of_existing_trackers = max(lof_int_trackid) + 1 tracker_manager = TrackerManager( 'LOCKID' + str(lock_id), current_id=number_of_existing_trackers) frame_reader = URLFrameReader(cam_url, scale_factor=1) matcher = FaissMatcher() if status == 'locked': embs = [] labels = [] cursors = mongodb_faceinfo.find({ 'face_id': this_locker['lock_face_id'] }) for cursor in cursors: embs.append(np.array(cursor['embedding'])) labels.append(cursor['image_id']) nof_registered_image_ids = len(labels) matcher.fit(embs, labels) while True: # in case the jerk hits the button if time.time() - start_time > 4: with open('../data/locker_{}_log.txt'.format(lock_id), 'a') as f: f.write('[LOCKER {}] OUT OF TIME! \n\n'.format(lock_id)) frame_reader.release() blynk_locker.stop_processing(status) return -1 frame = frame_reader.next_frame() if frame is None: print('Invalid Video Source') break fps_counter = time.time() # cv2.imshow('Locker {}'.format(lock_id), frame) # cv2.waitKey(1) tracker_manager.update_trackers(frame) if frame_counter % Config.Frame.FRAME_INTERVAL == 0: origin_bbs, points = detector.detect_face(frame) for i, in_origin_bb in enumerate(origin_bbs): origin_bb = in_origin_bb[:-1] display_face, str_padded_bbox = CropperUtils.crop_display_face( frame, origin_bb) cropped_face = CropperUtils.crop_face(frame, origin_bb) # is_spoofing = spoofing_detector.is_face_spoofing(cropped_face) # if is_spoofing: # acceptable_spoofing += 1 # with open('../data/spoofing_log.txt', 'a') as f: # f.write('Spoofing Detected at Locker {}: {}\n'.format(lock_id, is_spoofing)) # if acceptable_spoofing > 5: # with open('../data/locker_{}_log.txt'.format(lock_id), 'a') as f: # f.write( # '[LOCKER {}] STOP PROCESSING. ' # 'SPOOFING DETECTED!\n'.format(lock_id) # ) # frame_reader.release() # blynk_locker.stop_processing(status) # return -1 # Calculate embedding preprocessed_image = preprocessor.process(cropped_face) # preprocessed_image = align_preprocessor.process(frame, points[:,i], aligner, 160) emb_array, _ = face_extractor.extract_features( preprocessed_image) face_info = FaceInfo(origin_bb.tolist(), emb_array, frame_counter, display_face, str_padded_bbox, points[:, i].tolist()) is_good_face = handle_filters(points[:, i], coeff_extractor, face_info, preprocessed_image) face_info.is_good = is_good_face # TODO: refractor matching_detected_face_with_trackers matched_track_id = tracker_manager.track(face_info) if not face_info.is_good: print('BAD FACE') continue # Update tracker_manager tracker_manager.update(matched_track_id, frame, face_info) checking_tracker = None checking_tracker, top_predicted_face_ids, matching_result_dict = \ tracker_manager.check_and_recognize_tracker( matcher, matched_track_id, mongodb_faceinfo, None) # handle_results(checking_tracker, matching_result_dict) if checking_tracker is not None: dumped_images = checking_tracker.dump_images( mongodb_faceinfo, add_new=True, trackingfolder=tracking_folder) checking_tracker.represent_image_id = dumped_images[0] face_url = os.path.join(Config.SEND_RBMQ_HTTP, str(lock_id), str(checking_tracker.track_id), checking_tracker.represent_image_id) face_url += '.jpg' if status == 'available': # Save locker, sign up the face mongodb_lockers.remove({'lock_id': lock_id}) msg_dict = { 'lock_id': lock_id, 'status': 'locked', 'lock_face_url': face_url, 'lock_face_id': checking_tracker.face_id, 'lock_timestamp': time.time(), 'unlock_face_url': None, 'unlock_face_id': None, 'unlock_timestap': None } mongodb_lockers.insert_one(msg_dict) # update logs msg_dict.update({'log_timestamp': time.time()}) mongodb_logs.insert_one(msg_dict) with open('../data/locker_{}_log.txt'.format(lock_id), 'a') as f: f.write( '[LOCKER {}] REGISTERED FACE AS {}. LOCKED\n'. format(lock_id, checking_tracker.face_id)) blynk_locker.stop_processing('locked') elif status == 'locked': # Release the locker, face verification # update locker msg_dict = mongodb_lockers.find( { 'lock_id': lock_id }, projection={"_id": False})[0] msg_dict.update({ 'unlock_face': face_url, 'unlock_timestamp': time.time() }) if this_locker[ 'lock_face_id'] == checking_tracker.face_id: print('UNLOCK!') blynk_locker.stop_processing('available') mongodb_lockers.remove({'lock_id': lock_id}) mongodb_lockers.insert_one({ 'lock_id': lock_id, 'status': 'available', 'lock_face_id': None, 'lock_face_url': None, 'lock_timestamp': None, 'unlock_face_id': None, 'unlock_face_url': None, 'unlock_timestap': None }) with open( '../data/locker_{}_log.txt'.format(lock_id), 'a') as f: f.write( '[LOCKER {}] MATCHED WITH FACE ID {}. ' 'UNLOCKED. THIS LOCKER IS AVAILABLE NOW!\n'. format(lock_id, checking_tracker.face_id)) else: print('NOT MATCH') blynk_locker.stop_processing('locked') with open( '../data/locker_{}_log.txt'.format(lock_id), 'a') as f: f.write('[LOCKER {}] NOT MATCH. ' 'PLEASE TRY AGAIN!\n'.format(lock_id)) # update logs msg_dict.update({'log_timestamp': time.time()}) mongodb_logs.insert_one(msg_dict) frame_reader.release() return 1 tracker_manager.find_and_process_end_track(mongodb_faceinfo) frame_counter += 1 print("FPS: %f" % (1 / (time.time() - fps_counter)))
class FindSimilarFaceThread(Thread): ''' Find similar faces from dashboard ''' def __init__(self, **args): self.nrof_closest_faces = args.get('nrof_closest_faces', 10) self.database = args.get('database') self.rabbit_mq = args.get('rabbit_mq') self.event = threading.Event() self.setup_matcher() super(FindSimilarFaceThread, self).__init__() def join(self, timeout=None): print('Find similar joint') self.event.set() super(FindSimilarFaceThread, self).join() def setup_matcher(self): with open('../data/top10querylog.txt', 'a') as f: f.write('TOP10 QUERY IS BEING IN PROCESS !!!\n\n') ''' self.embs = [] self.labels = [] cursors = self.mongo.mongodb_dashinfo.find({}) unique_labels = [cursor['represent_image_id'] for cursor in cursors] cursors = self.mongo.mongodb_faceinfo.find({'image_id': {'$in': unique_labels}}) for cursor in cursors: self.embs.append(np.array(cursor['embedding'])) self.labels.append(cursor['image_id']) self.nof_registered_image_ids = len(self.labels) ''' self.labels, self.embs = self.database.get_labels_and_embs_dashboard() self.matcher = FaissMatcher() self.matcher.fit(self.embs, self.labels) with open('../data/top10querylog.txt', 'a') as f: f.write('MATCHER BUILT!!!\n\n') def run(self): while not self.event.is_set(): # first update check for new faces in dashboard ''' if self.nof_registered_image_ids < self.mongo.mongodb_dashinfo.find({}).count(): self.nof_registered_image_ids = self.mongo.mongodb_dashinfo.find({}).count() print('[Query TOP10] Update new registered image_id ...') cursors = self.mongo.mongodb_dashinfo.find({'represent_image_id': {'$nin': self.labels}}) unique_labels = [cursor['represent_image_id'] for cursor in cursors] cursors = self.mongo.mongodb_faceinfo.find({'image_id': {'$in': unique_labels}}) adding_embs = [] adding_labels = [] for cursor in cursors: adding_embs.append(np.array(cursor['embedding'])) adding_labels.append(cursor['image_id']) ''' adding_labels, adding_embs = self.database.get_labels_and_embs_dashboard( self.labels) if len(adding_labels) > 0: old_embs = self.embs old_labels = self.labels self.embs = old_embs + adding_embs self.labels = old_labels + adding_labels print('Find similar', len(adding_labels)) self.matcher.update(adding_embs, adding_labels) # get new query from from queue, why not just trigger action_msg = self.rabbit_mq.receive_str(Config.Queues.ACTION) if action_msg is not None: return_dict = json.loads(action_msg) print('Receive: {}'.format(return_dict)) if return_dict['actionType'] == 'getNearest': data = return_dict['data'] results = {} session_id = data['sessionId'] image_id = data['imageId'] print('[Query TOP10] image_id: ' + image_id) with open('../data/top10querylog.txt', 'a') as f: f.write('image_id: ' + image_id + '\n') cursors = self.database.mongodb_faceinfo.find( {'image_id': image_id}) if cursors.count() == 0: print( '[Query TOP10] THIS QUERY IMAGE ID HAS YET TO REGISTER' ) with open('../data/top10querylog.txt', 'a') as f: f.write( 'THIS QUERY IMAGE ID HAS YET TO REGISTER\n') face_id = self.database.mongodb_dashinfo.find( {'represent_image_id': image_id})[0]['face_id'] unique_labels = [ cursor['represent_image_id'] for cursor in self.database.mongodb_dashinfo.find( {'face_id': face_id}) ] for label in unique_labels: results[label] = '0' else: query_emb = cursors[0]['embedding'] embs = np.array(query_emb).astype('float32').reshape( (-1, 128)) dists, inds = self.matcher._classifier.search(embs, k=15) dists = np.squeeze(dists) inds = np.squeeze(inds) top_match_ids = [self.labels[idx] for idx in inds] for i, top_match_id in enumerate(top_match_ids): if i < 11 and top_match_id != image_id: results[top_match_id] = str(dists[i]) msg_results = { 'actionType': 'getNearest', 'sessionId': session_id, 'data': { 'images': results } } with open('../data/top10querylog.txt', 'a') as f: f.write('Result: \n{}\n\n'.format(results)) print('[Query TOP10] Result: \n{}'.format(results)) self.rabbit_mq.send_with_exchange( Config.Queues.ACTION_RESULT, session_id, json.dumps(msg_results)) else: time.sleep(1)
def simulate_tracking(root_folder): Config.Track.FACE_TRACK_IMAGES_OUT = True Config.Track.SEND_FIRST_STEP_RECOG_API = False Config.Track.MIN_MATCH_DISTACE_OUT = True Config.Track.CURRENT_EXTRACR_TIMER = 5 # Load Face Extractor face_rec_graph = FaceGraph() face_rec_graph_coeff = FaceGraph() face_extractor = FacenetExtractor( face_rec_graph, model_path=Config.FACENET_DIR) coeff_extractor = FacenetExtractor( face_rec_graph_coeff, model_path=Config.COEFF_DIR) # Create empty KDTreeMatcher matcher = FaissMatcher() matcher._match_case = 'TCH' # Preprocessor preprocessor = Preprocessor() # Fake rabbit mq rabbit_mq = FakeMQ() # Clean up for clear_tracking_folder() if Config.Matcher.CLEAR_SESSION: clear_session_folder() # Setup result list list_of_trackers = TrackersList() track_results = TrackerResultsDict() predict_dict = {} confirmed_ids_dict = {} sim_detections = gen_images_with_time(root_folder) for detection in sim_detections: frame = create_fake_frame(detection) sleep(0.05) trackers_return_dict, predict_trackers_dict = \ list_of_trackers.check_delete_trackers(matcher, rabbit_mq) track_results.update_two_dict(trackers_return_dict) predict_dict.update(predict_trackers_dict) confirmed_ids_dict = list_of_trackers.trackers_history.confirm_id( confirmed_ids_dict) list_of_trackers.trackers_history.check_time(matcher) list_of_trackers.update_dlib_trackers(frame) facial_quality = 1 # Crop face for features extraction origin_bb = detection[2] display_face, padded_bbox = CropperUtils.crop_display_face( frame, origin_bb) cropped_face = CropperUtils.crop_face(frame, origin_bb) bbox_str = '_'.join(np.array(origin_bb, dtype=np.unicode).tolist()) # Calculate embedding preprocessed_image = preprocessor.process(cropped_face) emb_array, _ = face_extractor.extract_features(preprocessed_image) _, coeff = coeff_extractor.extract_features(preprocessed_image) if coeff < 0.15: img_path = '../data/notenoughcoeff/{}_{}_{}.jpg'.format( detection, bbox_str, coeff) cv2.imwrite(img_path, cv2.cvtColor(display_face, cv2.COLOR_BGR2RGB)) facial_quality = -1 else: with open('../data/coeff_log.txt', 'a') as f: f.write('{}_{}_{}, coeff: {}\n'.format(bbox_str, detection[0], padded_bbox, coeff)) matched_fid = list_of_trackers.matching_face_with_trackers( frame, detection[0], origin_bb, emb_array, facial_quality) if facial_quality == -1 or coeff < 0.15: continue list_of_trackers.update_trackers_list( matched_fid, time.time(), origin_bb, display_face, emb_array, 0, 'VVT', 1, detection[0], padded_bbox, matcher, rabbit_mq) list_of_trackers.check_recognize_tracker(matcher, rabbit_mq, matched_fid) sleep(6) list_of_trackers.check_delete_trackers(matcher, rabbit_mq)
def generic_function(cam_url, queue_reader, area, re_source, use_frame_queue): global rabbit_mq ''' This is main function ''' print("Generic function") print("Cam URL: {}".format(cam_url)) print("Area: {}".format(area)) if Config.Matcher.CLEAR_SESSION: clear_session_folder() if Config.Mode.CALC_FPS: start_time = time.time() if cam_url is not None: frame_reader = URLFrameReader(cam_url, scale_factor=1, re_source=re_source) elif queue_reader is not None: frame_reader = RabbitFrameReader(rabbit_mq, queue_reader) else: print('Empty Image Source') return -1 if use_frame_queue: frame_src = FrameQueue(frame_reader, max_queue_size=Config.Frame.FRAME_QUEUE_SIZE) frame_src.start() else: frame_src = frame_reader video_out = None if Config.Track.TRACKING_VIDEO_OUT: video_out = VideoHandle(time.time(), video_out_fps, int(video_out_w), int(video_out_h)) # Variables for tracking faces frame_counter = 0 # Variables holding the correlation trackers and the name per faceid tracking_dirs = os.listdir(Config.Dir.TRACKING_DIR) if tracking_dirs == []: current_tracker_id = 0 else: list_of_trackid = [int(tracking_dir) for tracking_dir in tracking_dirs] current_tracker_id = max(list_of_trackid) + 1 imageid_to_keyid = {} matcher = FaissMatcher() matcher.build(database, imageid_to_keyid=imageid_to_keyid, use_image_id=True) tracker_manager = TrackerManager(area, matcher, database.mongodb_faceinfo, imageid_to_keyid=imageid_to_keyid, current_id=current_tracker_id) try: while True: frame = frame_src.next_frame() if Config.Mode.CALC_FPS: fps_counter = time.time() if frame is None: print("Waiting for the new image") tracker_manager.update(rabbit_mq) time.sleep(1) continue # track by kcf tracker_manager.update_trackers(frame) tracker_manager.update(rabbit_mq) origin_bbs, points = TensorflowAdapter.detect_face(frame) if len(origin_bbs) > 0: origin_bbs = [origin_bb[:4] for origin_bb in origin_bbs] embeddings_array = [None] * len(origin_bbs) tracker_manager.process_new_detections(frame, origin_bbs, points, embeddings_array, frame_id=frame_counter) frame_counter += 1 if Config.Mode.CALC_FPS: print("FPS: %f" % (1 / (time.time() - fps_counter))) #TODO: this line never run tracker_manager.update(rabbit_mq) except KeyboardInterrupt: if use_frame_queue: frame_src.stop() print('Keyboard Interrupt !!! Release All !!!') tracker_manager.update(rabbit_mq) frame_src.release() if Config.CALC_FPS: print('Time elapsed: {}'.format(time.time() - start_time)) print('Avg FPS: {}'.format( (frame_counter + 1) / (time.time() - start_time))) if Config.Track.TRACKING_VIDEO_OUT: print('Write track video') video_out.write_track_video(track_results.tracker_results_dict) video_out.release() else: raise Exception('Error')