def test_end_of_video(self): frame_reader = URLFrameReader( cam_url='%s//data/video/test-vin.mp4' % ROOT) for i in range(76): self.assertTrue(frame_reader.has_next()) frame_reader.next_frame() self.assertFalse(frame_reader.has_next())
def test_function(cam_url): # face_rec_graph = FaceGraph() # detector = MTCNNDetector(face_rec_graph) # estimator = HeadPoseEstimator(model_prefix='../models/cpt', ctx=mx.cpu()) if args.cam_url is not None: frame_reader = URLFrameReader(args.cam_url, scale_factor=1) else: return -1 while True: frame = frame_reader.next_frame() if frame is None: print('Frame is None...') time.sleep(5) continue # origin_bbs, points = detector.detect_face(frame) # for i, origin_bb in enumerate(origin_bbs): # cropped_face = CropperUtils.crop_face(frame, origin_bb) # yaw = FaceAngleUtils.calc_angle(points[:, i]) # pitch = FaceAngleUtils.calc_face_pitch(points[:, i]) # print(cropped_face.shape) # # resize_face = np.resize(cropped_face,(64, 64, 3)) # frame = FaceAngleUtils.plot_points(frame, points[:, i]) # # print('pitch-yaw angle of test1: {}'.format(estimator.predict(resize_face))) # print('pitch-yaw angle: {}, {}'.format(pitch, yaw)) # # print('pitch-yaw angle: {}'.format(estimator.crop_and_predict(frame, [points[:, i]]))) cv2.imshow('img', frame) cv2.waitKey(1)
def occlusion_dection_video(video_path, detector): frame_reader = URLFrameReader(video_path, scale_factor=1) frames_per_state = 4 state_correct = 0 curent_frame = 0 # Opening phase try: for i in range(frames_per_state): curent_frame += 1 frame = frame_reader.next_frame() detected_result = detector.detect(frame) frame_label = process_result(detected_result) if frame_label == NO_OCCLUSION: state_correct += 1 # fps = "{0}/{1}".format(curent_frame, frames_per_state) # put_text_on_image(frame, fps, BLUE, "top") # cv2.imshow('frame', frame) state_validation = True if state_correct >= 1 else False state_correct = 0 curent_frame = 0 # Realtime phase while frame_reader.has_next(): result_board = 255 * np.ones((400, 400, 3)) frame = frame_reader.next_frame() curent_frame += 1 show_information(frame, curent_frame, frames_per_state, state_validation) detected_result = detector.detect(frame) frame_label = process_result(detected_result) if frame_label == NO_OCCLUSION: state_correct += 1 if curent_frame >= frames_per_state: state_validation = True if state_correct >= 1 else False curent_frame = 0 state_correct = 0 display_result_board(result_board, detected_result) cv2.imshow('frame', frame) cv2.imshow('result', result_board) if cv2.waitKey(1) & 0xFF == ord('q'): break finally: # cap.release() cv2.destroyAllWindows()
def setUp(self): frame_reader = URLFrameReader('') self.frame_shape = (480, 720, 3) sample_frame = np.zeros(self.frame_shape) frame_reader.next_frame = mock.MagicMock(return_value=sample_frame) self.lock = threading.Lock() self.max_queue_size = 10 self.max_frame_on_disk = 10 self.frame_queue = FrameQueue( frame_reader, self.lock, max_queue_size=self.max_queue_size, max_frame_on_disk=self.max_frame_on_disk) self.frame_queue.start()
def generate_video_sample(cam_url, area): '''generating''' print('Generating... ') print("Cam URL: {}".format(cam_url)) print("Area: {}".format(area)) # Variables for tracking faces frame_counter = 0 # Variables holding the correlation trackers and the name per faceid frame_sample = {} face_rec_graph = FaceGraph() face_extractor = FacenetExtractor(face_rec_graph) detector = MTCNNDetector(face_rec_graph) preprocessor = Preprocessor() if args.cam_url is not None: frame_reader = URLFrameReader(args.cam_url, scale_factor=1) else: frame_reader = RabbitFrameReader(rabbit_mq) try: while True: # frame_reader.has_next(): frame = frame_reader.next_frame() frame_sample[frame_counter] = FrameSample() frame_sample[frame_counter].read_image = frame if frame is None: print("Waiting for the new image") continue print("Frame ID: %d" % frame_counter) if frame_counter % Config.Frame.FRAME_INTERVAL == 0: origin_bbs, points = detector.detect_face(frame) frame_sample[frame_counter].origin_bbs = origin_bbs frame_sample[frame_counter].points = points for _, origin_bb in enumerate(origin_bbs): cropped_face = CropperUtils.crop_face(frame, origin_bb) # Calculate embedding preprocessed_image = preprocessor.process(cropped_face) emb_array, coeff = face_extractor.extract_features( preprocessed_image) frame_sample[frame_counter].embs.append(emb_array) frame_counter += 1 except KeyboardInterrupt: print('Keyboard Interrupt !!! Release All !!!') print('Saved this video sample as ../session/db/sample.pkl') PickleUtils.save_pickle('../session/db/sample.pkl', frame_sample)
def main(cam_url, recording_area): rb = RabbitMQ((Config.Rabbit.USERNAME, Config.Rabbit.PASSWORD), (Config.Rabbit.IP_ADDRESS, Config.Rabbit.PORT)) detector = MTCNNDetector(FaceGraph()) frame_reader = URLFrameReader(cam_url) edit_image = utils.CropperUtils() face_angle = utils.FaceAngleUtils() feature_extractor = FacenetExtractor(FaceGraph()) pre_process = Preprocessor(whitening) while frame_reader.has_next(): embedding_images = [] embedding_vectors = [] display_images = [] display_image_bounding_boxes = [] frame = frame_reader.next_frame() bounding_boxes, points = detector.detect_face(frame) for index, bounding_box in enumerate(bounding_boxes): if face_angle.is_acceptable_angle(points[:, index]) is True: embedding_image = edit_image.crop_face(frame, bounding_box) embedding_images.append(embedding_image) display_image, display_image_bounding_box = edit_image.crop_display_face( frame, bounding_box) display_images.append(display_image) display_image_bounding_boxes.append(display_image_bounding_box) whitened_image = pre_process.process(embedding_image) embedding_vector, coeff = feature_extractor.extract_features( whitened_image) embedding_vectors.append(embedding_vector) if len(embedding_vectors) > 0: rb.send_multi_embedding_message(display_images, embedding_vectors, recording_area, time.time(), display_image_bounding_boxes, rb.SEND_QUEUE_WORKER) else: print("No Face Detected")
def rotate_video(original_path, by_landmark=False): restructured_path = original_path + "_restructured" create_if_not_exist(restructured_path) videos = [os.path.join(original_path, id) for id in os.listdir(original_path)\ if not os.path.isdir(os.path.join(original_path,id))] for video in videos: frame_reader = URLFrameReader(video, scale_factor=1) video_name = video.split("/")[-1].split(".")[0] video_type = video.split("/")[-1].split(".")[1] is_rotate = False if by_landmark: while True: frame = frame_reader.next_frame() if frame is None: break rects, landmarks = detector.detect_face(frame) if len(rects) > 0: rotate_angel = FaceAngleUtils.calc_face_rotate_angle( landmarks[:, 0]) print("Points: " + str(landmarks[:, 0]) + ", rotate_angel: " + str(rotate_angel)) if rotate_angel > 30: video_name += "_rotate" break else: cmd = 'ffmpeg -i %s' % video p = subprocess.Popen(cmd.split(" "), stderr=subprocess.PIPE, close_fds=True) stdout, stderr = p.communicate() reo_rotation = re.compile(b'rotate\s+:\s(?P<rotation>.*)') match_rotation = reo_rotation.search(stderr) if (match_rotation is not None and len(match_rotation.groups()) > 0): rotation = match_rotation.groups()[0] if int(rotation) > 0: video_name += "_rotate_" + str(int(rotation)) n_video_path = os.path.join(restructured_path, video_name + "." + video_type) copy(video, n_video_path) print(video_name)
def generic_function(cam_url, queue_reader, area, face_extractor_model, re_source, multi_thread): ''' This is main function ''' print("Generic function") print("Cam URL: {}".format(cam_url)) print("Area: {}".format(area)) # Variables for tracking faces frame_counter = 0 # Variables holding the correlation trackers and the name per faceid tracking_dirs = glob.glob(Config.TRACKING_DIR + '/*') if tracking_dirs == []: number_of_existing_trackers = 0 else: lof_int_trackid = [ int(tracking_dir.split('/')[-1]) for tracking_dir in tracking_dirs ] number_of_existing_trackers = max(lof_int_trackid) + 1 imageid_to_keyid = {} tracker_manager = TrackerManager( area, imageid_to_keyid=imageid_to_keyid, current_id=number_of_existing_trackers) if Config.Matcher.CLEAR_SESSION: clear_session_folder() global querying_top10_image_ids_queue # mongodb_faceinfo.remove({}) # reg_dict = PickleUtils.read_pickle(Config.REG_IMAGE_FACE_DICT_FILE) # if reg_dict is not None: # for fid in reg_dict: # mongodb_faceinfo.insert_one({'image_id': fid, 'face_id': reg_dict[fid]}) # print('Saved regdict in mongodb as collection regdict') matcher = FaissMatcher() matcher.build( mongodb_faceinfo, imageid_to_keyid=imageid_to_keyid, use_image_id=True) svm_matcher = None if Config.Matcher.CLOSE_SET_SVM: svm_matcher = SVMMatcher() svm_matcher.build(mongodb_faceinfo) track_results = TrackerResultsDict() if Config.CALC_FPS: start_time = time.time() if cam_url is not None: frame_reader = URLFrameReader(cam_url, scale_factor=1) elif queue_reader is not None: frame_reader = RabbitFrameReader(rabbit_mq, queue_reader) elif args.anno_mode: frame_reader = URLFrameReader('./nothing.mp4', scale_factor=1) else: print('Empty Image Source') return -1 if not args.anno_mode: video_out_fps, video_out_w, video_out_h, = frame_reader.get_info() print(video_out_fps, video_out_w, video_out_h) bbox = [ int(Config.Frame.ROI_CROP[0] * video_out_w), int(Config.Frame.ROI_CROP[1] * video_out_h), int(Config.Frame.ROI_CROP[2] * video_out_w), int(Config.Frame.ROI_CROP[3] * video_out_h) ] # bbox = [0, 0, video_out_w, video_out_h] video_out = None if Config.Track.TRACKING_VIDEO_OUT: video_out = VideoHandle(time.time(), video_out_fps, int(video_out_w), int(video_out_h)) # Turn on querying top 10 from queue if Config.QUERY_TOP10_MODE: thread = threading.Thread(target=give_this_id_10_closest_ids) thread.daemon = True thread.start() frame_queue = [] lock = threading.Lock() if multi_thread: is_on = [True] t = threading.Thread(target=(get_frames), args=(frame_queue, frame_reader, re_source, \ cam_url, queue_reader, lock, is_on, )) t.start() try: while True: ms_msg = rabbit_mq.receive_str(Config.Queues.MERGE) ms_flag = 'merge' if ms_msg is None: ms_msg = rabbit_mq.receive_str(Config.Queues.SPLIT) ms_flag = 'split' if ms_msg is not None: merge_anchor, merge_list = extract_info_from_json(ms_msg) while merge_list != []: image_id1 = merge_list.pop() split_merge_id(ms_flag, image_id1, merge_anchor, matcher, preprocessor, face_extractor, tracker_manager, mongodb_dashinfo, mongodb_faceinfo, mongodb_mslog) continue action_msg = rabbit_mq.receive_str(Config.Queues.ACTION) if action_msg is not None: return_dict = json.loads(action_msg) print('Receive: {}'.format(return_dict)) if return_dict['actionType'] == 'getNearest': lock.acquire() querying_top10_image_ids_queue.append(return_dict['data']) lock.release() continue if args.anno_mode: print('Annotation Mode, waiting for new tasks ...') time.sleep(1) continue if multi_thread: if len(frame_queue) > 0: lock.acquire() frame = frame_queue.pop(0) lock.release() else: frame = None else: frame = frame_reader.next_frame() tracker_manager.update_trackers(frame) #do this before check_and_recognize tracker (sync local matcher vs mongodb) trackers_return_dict, recognized_results = update_recognition( self, matcher, svm_matcher, mongodb_faceinfo) for tracker, matching_result_dict in recognized_results: handle_results(tracker, matching_result_dict, imageid_to_keyid = imageid_to_keyid, \ dump=False) # trackers_return_dict = tracker_manager.find_and_process_end_track(mongodb_faceinfo) track_results.merge(trackers_return_dict) tracker_manager.long_term_history.check_time( matcher, mongodb_faceinfo) if frame is None: print("Waiting for the new image") # if Config.Track.RECOGNIZE_FULL_TRACK: # overtime_track_ids = tracker_manager.find_overtime_current_trackers( # time_last=Config.Track.CURRENT_EXTRACR_TIMER-5, # find_unrecognized=True # ) # for overtime_track_id in overtime_track_ids: # checking_tracker, top_predicted_face_ids, matching_result_dict = \ # tracker_manager.check_and_recognize_tracker( # matcher, # overtime_track_id, # mongodb_faceinfo, # svm_matcher) # handle_results(checking_tracker, matching_result_dict, imageid_to_keyid = imageid_to_keyid,\ # dump=False) if re_source and not multi_thread: print('Trying to connect the stream again ...') if cam_url is not None: frame_reader = URLFrameReader(cam_url, scale_factor=1) elif queue_reader is not None: frame_reader = RabbitFrameReader( rabbit_mq, queue_reader) else: print('Empty Image Source') return -1 break print("Frame ID: %d" % frame_counter) if "_rotate" in video: # rotate cw rotation = int(video.split("_")[-1].split(".")[0]) frame = rotate_image_90(frame, rotation) if Config.Track.TRACKING_VIDEO_OUT: video_out.tmp_video_out(frame) if Config.CALC_FPS: fps_counter = time.time() if frame_counter % Config.Frame.FRAME_INTERVAL == 0: # crop frame #frame = frame[bbox[1]:bbox[3], bbox[0]:bbox[2],:] origin_bbs, points = detector.detect_face(frame) if len(origin_bbs) > 0: origin_bbs = [origin_bb[:4] for origin_bb in origin_bbs] display_and_padded_faces = [ CropperUtils.crop_display_face(frame, origin_bb) for origin_bb in origin_bbs ] #cropped_faces = [CropperUtils.crop_face(frame, origin_bb) for origin_bb in origin_bbs] preprocessed_images = [ preprocessor.process( CropperUtils.crop_face(frame, origin_bb)) for origin_bb in origin_bbs ] embeddings_array, _ = face_extractor.extract_features_all_at_once( preprocessed_images) for i, origin_bb in enumerate(origin_bbs): display_face, str_padded_bbox = display_and_padded_faces[i] #cropped_face = CropperUtils.crop_face(frame, origin_bb) # Calculate embedding preprocessed_image = preprocessed_images[i] # preprocessed_image = align_preprocessor.process(frame, points[:,i], aligner, 160) emb_array = np.asarray([embeddings_array[i]]) face_info = FaceInfo( #oigin_bb.tolist(), #emb_array, frame_counter, origin_bb, points[:, i] #display_face, #str_padded_bbox, #points[:,i].tolist() ) is_good_face = handle_filters(points[:, i], coeff_extractor, face_info, preprocessed_image) face_info.is_good = is_good_face # TODO: refractor matching_detected_face_with_trackers matched_track_id = tracker_manager.track(face_info) if not face_info.is_good: print('BAD FACE') continue # Update tracker_manager tracker_manager.update(matched_track_id, frame, face_info) checking_tracker = None # if not Config.Track.RECOGNIZE_FULL_TRACK: # checking_tracker, top_predicted_face_ids, matching_result_dict = \ # tracker_manager.check_and_recognize_tracker( # matcher, # matched_track_id, # mongodb_faceinfo, # svm_matcher) # handle_results(checking_tracker, matching_result_dict, imageid_to_keyid = imageid_to_keyid, \ # dump=True) # if Config.Track.RECOGNIZE_FULL_TRACK: # overtime_track_ids = tracker_manager.find_overtime_current_trackers( # time_last=Config.Track.CURRENT_EXTRACR_TIMER-5, # find_unrecognized=True # ) # for overtime_track_id in overtime_track_ids: # checking_tracker, top_predicted_face_ids, matching_result_dict = \ # tracker_manager.check_and_recognize_tracker( # matcher, # overtime_track_id, # mongodb_faceinfo, # svm_matcher) # handle_results(checking_tracker, matching_result_dict, imageid_to_keyid = imageid_to_keyid, \ # dump=False) frame_counter += 1 if Config.CALC_FPS: print("FPS: %f" % (1 / (time.time() - fps_counter))) if Config.Track.TRACKING_VIDEO_OUT: print('Write track video') video_out.write_track_video(track_results.tracker_results_dict) Config.Track.CURRENT_EXTRACR_TIMER = 0 trackers_return_dict = tracker_manager.find_and_process_end_track( mongodb_faceinfo) Config.Track.HISTORY_CHECK_TIMER = 0 Config.Track.HISTORY_EXTRACT_TIMER = 0 tracker_manager.long_term_history.check_time(matcher, mongodb_faceinfo) except KeyboardInterrupt: if multi_thread: is_on[0] = False t.join() print('Keyboard Interrupt !!! Release All !!!') Config.Track.CURRENT_EXTRACR_TIMER = 0 trackers_return_dict = tracker_manager.find_and_process_end_track( mongodb_faceinfo) Config.Track.HISTORY_CHECK_TIMER = 0 Config.Track.HISTORY_EXTRACT_TIMER = 0 tracker_manager.long_term_history.check_time(matcher, mongodb_faceinfo) if Config.CALC_FPS: print('Time elapsed: {}'.format(time.time() - start_time)) print('Avg FPS: {}'.format( (frame_counter + 1) / (time.time() - start_time))) frame_reader.release() if Config.Track.TRACKING_VIDEO_OUT: print('Write track video') video_out.write_track_video(track_results.tracker_results_dict) video_out.release()
from tf_graph import FaceGraph from cv_utils import show_frame, CropperUtils from preprocess import Preprocessor from matcher import KdTreeMatcher from frame_reader import URLFrameReader import time matcher = KdTreeMatcher() face_graph = FaceGraph() face_detector = MTCNNDetector(face_graph) feature_extractor = FacenetExtractor(face_graph) preprocessor = Preprocessor() frame_reader = URLFrameReader(cam_url=0, scale_factor=2) while frame_reader.has_next(): frame = frame_reader.next_frame() bouncing_boxes, landmarks = face_detector.detect_face(frame) nrof_faces = len(bouncing_boxes) start = time.time() for i in range(nrof_faces): cropped = CropperUtils.crop_face(frame, bouncing_boxes[i]) display_face, padded_bb_str = CropperUtils.crop_display_face( frame, bouncing_boxes[i]) reverse_face = CropperUtils.reverse_display_face( display_face, padded_bb_str) process_img = preprocessor.process(cropped) show_frame(reverse_face, 'Reverse') show_frame(cropped, 'Cropped') emb, coeff = feature_extractor.extract_features(process_img) predict_id, top_match_ids = matcher.match(emb) print('Predict', predict_id)
def general_process(lock_id, detector, preprocessor, face_extractor, blynk_locker): ''' INPUT: lock_id ''' # Get locker infomation # lock_id = 'query from mongo' locker_info = mongodb_lockersinfo.find({'lock_id': lock_id})[0] this_locker = mongodb_lockers.find({'lock_id': lock_id})[0] cam_url = locker_info['cam_url'] status = this_locker['status'] blynk_locker.processing(status) # init face info mongodb_faceinfo = mongodb_db[str(lock_id)] # Variables for tracking faces frame_counter = 0 start_time = time.time() acceptable_spoofing = 0 # Variables holding the correlation trackers and the name per faceid tracking_folder = os.path.join(Config.TRACKING_DIR, str(lock_id)) create_if_not_exist(tracking_folder) tracking_dirs = glob.glob(tracking_folder + '/*') if tracking_dirs == []: number_of_existing_trackers = 0 else: lof_int_trackid = [ int(tracking_dir.split('/')[-1]) for tracking_dir in tracking_dirs ] number_of_existing_trackers = max(lof_int_trackid) + 1 tracker_manager = TrackerManager( 'LOCKID' + str(lock_id), current_id=number_of_existing_trackers) frame_reader = URLFrameReader(cam_url, scale_factor=1) matcher = FaissMatcher() if status == 'locked': embs = [] labels = [] cursors = mongodb_faceinfo.find({ 'face_id': this_locker['lock_face_id'] }) for cursor in cursors: embs.append(np.array(cursor['embedding'])) labels.append(cursor['image_id']) nof_registered_image_ids = len(labels) matcher.fit(embs, labels) while True: # in case the jerk hits the button if time.time() - start_time > 4: with open('../data/locker_{}_log.txt'.format(lock_id), 'a') as f: f.write('[LOCKER {}] OUT OF TIME! \n\n'.format(lock_id)) frame_reader.release() blynk_locker.stop_processing(status) return -1 frame = frame_reader.next_frame() if frame is None: print('Invalid Video Source') break fps_counter = time.time() # cv2.imshow('Locker {}'.format(lock_id), frame) # cv2.waitKey(1) tracker_manager.update_trackers(frame) if frame_counter % Config.Frame.FRAME_INTERVAL == 0: origin_bbs, points = detector.detect_face(frame) for i, in_origin_bb in enumerate(origin_bbs): origin_bb = in_origin_bb[:-1] display_face, str_padded_bbox = CropperUtils.crop_display_face( frame, origin_bb) cropped_face = CropperUtils.crop_face(frame, origin_bb) # is_spoofing = spoofing_detector.is_face_spoofing(cropped_face) # if is_spoofing: # acceptable_spoofing += 1 # with open('../data/spoofing_log.txt', 'a') as f: # f.write('Spoofing Detected at Locker {}: {}\n'.format(lock_id, is_spoofing)) # if acceptable_spoofing > 5: # with open('../data/locker_{}_log.txt'.format(lock_id), 'a') as f: # f.write( # '[LOCKER {}] STOP PROCESSING. ' # 'SPOOFING DETECTED!\n'.format(lock_id) # ) # frame_reader.release() # blynk_locker.stop_processing(status) # return -1 # Calculate embedding preprocessed_image = preprocessor.process(cropped_face) # preprocessed_image = align_preprocessor.process(frame, points[:,i], aligner, 160) emb_array, _ = face_extractor.extract_features( preprocessed_image) face_info = FaceInfo(origin_bb.tolist(), emb_array, frame_counter, display_face, str_padded_bbox, points[:, i].tolist()) is_good_face = handle_filters(points[:, i], coeff_extractor, face_info, preprocessed_image) face_info.is_good = is_good_face # TODO: refractor matching_detected_face_with_trackers matched_track_id = tracker_manager.track(face_info) if not face_info.is_good: print('BAD FACE') continue # Update tracker_manager tracker_manager.update(matched_track_id, frame, face_info) checking_tracker = None checking_tracker, top_predicted_face_ids, matching_result_dict = \ tracker_manager.check_and_recognize_tracker( matcher, matched_track_id, mongodb_faceinfo, None) # handle_results(checking_tracker, matching_result_dict) if checking_tracker is not None: dumped_images = checking_tracker.dump_images( mongodb_faceinfo, add_new=True, trackingfolder=tracking_folder) checking_tracker.represent_image_id = dumped_images[0] face_url = os.path.join(Config.SEND_RBMQ_HTTP, str(lock_id), str(checking_tracker.track_id), checking_tracker.represent_image_id) face_url += '.jpg' if status == 'available': # Save locker, sign up the face mongodb_lockers.remove({'lock_id': lock_id}) msg_dict = { 'lock_id': lock_id, 'status': 'locked', 'lock_face_url': face_url, 'lock_face_id': checking_tracker.face_id, 'lock_timestamp': time.time(), 'unlock_face_url': None, 'unlock_face_id': None, 'unlock_timestap': None } mongodb_lockers.insert_one(msg_dict) # update logs msg_dict.update({'log_timestamp': time.time()}) mongodb_logs.insert_one(msg_dict) with open('../data/locker_{}_log.txt'.format(lock_id), 'a') as f: f.write( '[LOCKER {}] REGISTERED FACE AS {}. LOCKED\n'. format(lock_id, checking_tracker.face_id)) blynk_locker.stop_processing('locked') elif status == 'locked': # Release the locker, face verification # update locker msg_dict = mongodb_lockers.find( { 'lock_id': lock_id }, projection={"_id": False})[0] msg_dict.update({ 'unlock_face': face_url, 'unlock_timestamp': time.time() }) if this_locker[ 'lock_face_id'] == checking_tracker.face_id: print('UNLOCK!') blynk_locker.stop_processing('available') mongodb_lockers.remove({'lock_id': lock_id}) mongodb_lockers.insert_one({ 'lock_id': lock_id, 'status': 'available', 'lock_face_id': None, 'lock_face_url': None, 'lock_timestamp': None, 'unlock_face_id': None, 'unlock_face_url': None, 'unlock_timestap': None }) with open( '../data/locker_{}_log.txt'.format(lock_id), 'a') as f: f.write( '[LOCKER {}] MATCHED WITH FACE ID {}. ' 'UNLOCKED. THIS LOCKER IS AVAILABLE NOW!\n'. format(lock_id, checking_tracker.face_id)) else: print('NOT MATCH') blynk_locker.stop_processing('locked') with open( '../data/locker_{}_log.txt'.format(lock_id), 'a') as f: f.write('[LOCKER {}] NOT MATCH. ' 'PLEASE TRY AGAIN!\n'.format(lock_id)) # update logs msg_dict.update({'log_timestamp': time.time()}) mongodb_logs.insert_one(msg_dict) frame_reader.release() return 1 tracker_manager.find_and_process_end_track(mongodb_faceinfo) frame_counter += 1 print("FPS: %f" % (1 / (time.time() - fps_counter)))
def main_function(cam_url, image_url, queue_reader, area): ''' This is main function ''' print("Cam URL: {}".format(cam_url)) print("Area: {}".format(area)) # Variables for tracking faces frame_counter = 0 # Variables holding the correlation trackers and the name per faceid list_of_trackers = TrackersList() clear_tracking_folder() # Model for human detection print('Load YOLO model ...') options = { "model": "./cfg/yolo.cfg", "load": "../models/yolo.weights", "threshold": 0.5 } detector = TFNet(options) # Model for person re-id body_extractor = BodyExtractor() if image_url is not None: imgcv = cv2.imread(image_url) results = detector.return_predict(imgcv) print(results) imgcv = draw_results(imgcv, results) print('Result drawn as ../test-data/result.jpg') cv2.imwrite('../test-data/result.jpg', imgcv) track_results = TrackerResultsDict() predict_dict = {} if Config.CALC_FPS: start_time = time.time() if args.cam_url is not None: frame_reader = URLFrameReader(args.cam_url, scale_factor=1) elif queue_reader is not None: frame_reader = RabbitFrameReader(rabbit_mq, queue_reader) else: print('Empty Image Source') return -1 video_out_fps, video_out_w, video_out_h, = frame_reader.get_info() print(video_out_fps, video_out_w, video_out_h) center = (int(video_out_w / 2), int(video_out_h / 2)) bbox = [ int(center[0] - Config.Frame.ROI_CROP[0] * video_out_w), int(center[1] - video_out_h * Config.Frame.ROI_CROP[1]), int(center[0] + Config.Frame.ROI_CROP[2] * video_out_w), int(center[1] + Config.Frame.ROI_CROP[3] * video_out_h) ] video_out = None if Config.Track.TRACKING_VIDEO_OUT: # new_w = abs(bbox[0] - bbox[2]) # new_h = abs(bbox[1] - bbox[3]) # print(new_w, new_h) video_out = VideoHandle('../data/tracking_video_out.avi', video_out_fps, int(video_out_w), int(video_out_h)) dlib_c_tr_video = cv2.VideoWriter('../data/check_tracking_video.avi', cv2.VideoWriter_fourcc(*'XVID'), video_out_fps, (int(video_out_w), int(video_out_h))) try: while True: # frame_reader.has_next(): frame = frame_reader.next_frame() if frame is None: print("Waiting for the new image") trackers_return_dict, predict_trackers_dict = \ list_of_trackers.check_delete_trackers(None, rabbit_mq) track_results.update_two_dict(trackers_return_dict) predict_dict.update(predict_trackers_dict) # list_of_trackers.trackers_history.check_time(None) # if args.cam_url is not None: # frame_reader = URLFrameReader(args.cam_url, scale_factor=1) # elif queue_reader is not None: # frame_reader = RabbitFrameReader(rabbit_mq, queue_reader) # else: # print('Empty Image Source') # return -1 continue print("Frame ID: %d" % frame_counter) # frame = frame[bbox[1]:bbox[3], bbox[0]:bbox[2], :] if Config.Track.TRACKING_VIDEO_OUT: video_out.tmp_video_out(frame) if Config.CALC_FPS: fps_counter = time.time() tmpi = list_of_trackers.update_dlib_trackers(frame) dlib_c_tr_video.write(tmpi) if frame_counter % Config.Frame.FRAME_INTERVAL == 0: start_time = time.time() detected_bbs = [] results = detector.return_predict(frame) print('Detect time: {}'.format(1 / (time.time() - start_time))) for result in results: if result['label'] != 'person': continue x1 = result['topleft']['x'] y1 = result['topleft']['y'] x2 = result['bottomright']['x'] y2 = result['bottomright']['y'] origin_bb = [x1, y1, x2, y2] detected_bbs.append(origin_bb) print(is_inner_bb(bbox, origin_bb)) if not is_inner_bb(bbox, origin_bb): continue if calc_bb_size(origin_bb) < 10000: continue bb_size = calc_bb_size(origin_bb) body_image = frame[origin_bb[1]:origin_bb[3], origin_bb[0]:origin_bb[2], :] # body_emb = body_extractor.extract_feature(body_image) # TODO: refractor matching_detected_face_with_trackers matched_fid = list_of_trackers.matching_face_with_trackers( frame, frame_counter, origin_bb, None, body_image, body_extractor) # Update list_of_trackers list_of_trackers.update_trackers_list( matched_fid, time.time(), origin_bb, body_image, bb_size, area, frame_counter, None, body_extractor, rabbit_mq) # list_of_trackers.update_trackers_by_tracking(body_extractor, # frame, # area, # frame_counter, # detected_bbs, # rabbit_mq) trackers_return_dict, predict_trackers_dict = \ list_of_trackers.check_delete_trackers(None, rabbit_mq) track_results.update_two_dict(trackers_return_dict) predict_dict.update(predict_trackers_dict) # Check extract trackers history time (str(frame_counter) + '_' + str(i)) # list_of_trackers.trackers_history.check_time(None) frame_counter += 1 if Config.CALC_FPS: print("FPS: %f" % (1 / (time.time() - fps_counter))) if Config.Track.TRACKING_VIDEO_OUT: print('Write track video') video_out.write_track_video(track_results.tracker_results_dict) if Config.Track.PREDICT_DICT_OUT: PickleUtils.save_pickle(Config.PREDICTION_DICT_FILE, predict_dict) except KeyboardInterrupt: print('Keyboard Interrupt !!! Release All !!!') # list_of_trackers.trackers_history.check_time(matcher) if Config.CALC_FPS: print('Time elapsed: {}'.format(time.time() - start_time)) print('Avg FPS: {}'.format( (frame_counter + 1) / (time.time() - start_time))) frame_reader.release() if Config.Track.TRACKING_VIDEO_OUT: print('Write track video') video_out.write_track_video(track_results.tracker_results_dict) video_out.release() if Config.Track.PREDICT_DICT_OUT: PickleUtils.save_pickle(Config.PREDICTION_DICT_FILE, predict_dict)
def cam_worker_function(cam_url, area): ''' Cam worker function ''' print("Cam URL: {}".format(cam_url)) print("Area: {}".format(area)) # Modify Config Config.Track.TRACKING_QUEUE_CAM_TO_CENTRAL = True rabbit_mq = RabbitMQ((Config.Rabbit.USERNAME, Config.Rabbit.PASSWORD), (Config.Rabbit.IP_ADDRESS, Config.Rabbit.PORT)) frame_counter = 0 # Variables holding the correlation trackers and the name per faceid list_of_trackers = TrackersList() face_rec_graph = FaceGraph() face_extractor = FacenetExtractor(face_rec_graph) detector = MTCNNDetector(face_rec_graph) preprocessor = Preprocessor() matcher = KdTreeMatcher() if Config.CALC_FPS: start_time = time.time() if args.cam_url is not None: frame_reader = URLFrameReader(args.cam_url, scale_factor=1.5) else: frame_reader = RabbitFrameReader(rabbit_mq) try: while True: # frame_reader.has_next(): frame = frame_reader.next_frame() if frame is None: print("Waiting for the new image") list_of_trackers.check_delete_trackers(matcher, rabbit_mq, history_mode=False) continue print("Frame ID: %d" % frame_counter) if Config.CALC_FPS: fps_counter = time.time() list_of_trackers.update_dlib_trackers(frame) if frame_counter % Config.Frame.FRAME_INTERVAL == 0: origin_bbs, points = detector.detect_face(frame) for i, origin_bb in enumerate(origin_bbs): display_face, _ = CropperUtils.crop_display_face( frame, origin_bb) print("Display face shape") print(display_face.shape) if 0 in display_face.shape: continue cropped_face = CropperUtils.crop_face(frame, origin_bb) # Calculate embedding preprocessed_image = preprocessor.process(cropped_face) emb_array, coeff = face_extractor.extract_features( preprocessed_image) # Calculate angle angle = FaceAngleUtils.calc_angle(points[:, i]) # TODO: refractor matching_detected_face_with_trackers matched_fid = list_of_trackers.matching_face_with_trackers( frame, origin_bb, emb_array) # Update list_of_trackers list_of_trackers.update_trackers_list( matched_fid, origin_bb, display_face, emb_array, angle, area, frame_counter, matcher, rabbit_mq) if Config.Track.TRACKING_QUEUE_CAM_TO_CENTRAL: track_tuple = (matched_fid, display_face, emb_array, area, time.time(), origin_bb, angle) rabbit_mq.send_tracking( track_tuple, rabbit_mq.RECEIVE_CAM_WORKER_TRACKING_QUEUE) # Check detete current trackers time list_of_trackers.check_delete_trackers(matcher, rabbit_mq, history_mode=False) frame_counter += 1 if Config.CALC_FPS: print("FPS: %f" % (1 / (time.time() - fps_counter))) except KeyboardInterrupt: print('Keyboard Interrupt !!! Release All !!!') if Config.CALC_FPS: print('Time elapsed: {}'.format(time.time() - start_time)) print('Avg FPS: {}'.format( (frame_counter + 1) / (time.time() - start_time))) frame_reader.release()
def generic_function(cam_url, queue_reader, area, face_extractor_model, re_source): ''' This is main function ''' print("Generic function") print("Cam URL: {}".format(cam_url)) print("Area: {}".format(area)) # TODO: init logger, modulize this? # Variables for tracking faces frame_counter = 0 if Config.Matcher.CLEAR_SESSION: clear_session_folder() if Config.Mode.CALC_FPS: start_time = time.time() if cam_url is not None: frame_reader = URLFrameReader(cam_url) else: print('Empty Image Source') return -1 video_out_fps, video_out_w, video_out_h, = frame_reader.get_info() print(video_out_fps, video_out_w, video_out_h) video_out = None if Config.Track.TRACKING_VIDEO_OUT: video_out = VideoHandle(time.time(), video_out_fps, int(video_out_w), int(video_out_h)) db = DashboardDatabase(use_image_id=True) rabbit_mq = RabbitMQ((Config.Rabbit.USERNAME, Config.Rabbit.PASSWORD), (Config.Rabbit.IP_ADDRESS, Config.Rabbit.PORT)) matcher = KdTreeMatcher() matcher.build(db) # find current track import glob tracking_dirs = glob.glob(Config.Dir.TRACKING_DIR + '/*') if tracking_dirs == []: number_of_existing_trackers = 0 else: lof_int_trackid = [ int(tracking_dir.split('/')[-1]) for tracking_dir in tracking_dirs ] number_of_existing_trackers = max(lof_int_trackid) + 1 mode = 'video' # video, live ''' # Feature 1: Find Merge Split splitMerge = pipe.SplitMergeThread(database=db, rabbit_mq=rabbit_mq, matcher=matcher) splitMerge.daemon = True splitMerge.start() # Feature 2: Find similar findSimilarFaceThread = pipe.FindSimilarFaceThread(database=db, rabbit_mq=rabbit_mq) findSimilarFaceThread.daemon = True findSimilarFaceThread.start() ''' # main program stage stageDetectFace = pipe.Stage(pipe.FaceDetectWorker, 1) stagePreprocess = pipe.Stage(pipe.PreprocessDetectedFaceWorker, 1) stageDistributor = pipe.Stage(pipe.FaceDistributorWorker, 1) stageExtract = pipe.Stage(pipe.FaceExtractWorker, 1) stageTrack = pipe.Stage(pipe.FullTrackTrackingWorker, 1, area=area, database=db, matcher=matcher, init_tracker_id=number_of_existing_trackers) stageResultToTCH = pipe.Stage(pipe.SendToDashboardWorker, 1, database=db, rabbit_mq=rabbit_mq) stageStorage = pipe.Stage(pipe.DashboardStorageWorker, 1) stageDatabase = pipe.Stage(pipe.DashboardDatabaseWorker, 1, database=db) stageDetectFace.link(stagePreprocess) stagePreprocess.link(stageDistributor) stageDistributor.link(stageExtract) stageExtract.link(stageTrack) stageTrack.link(stageResultToTCH) stageTrack.link(stageStorage) stageTrack.link(stageDatabase) if Config.Track.TRACKING_VIDEO_OUT: stageVideoOut = pipe.Stage(pipe.VideoWriterWorker, 1, database=db, video_out=video_out) stageTrack.link(stageVideoOut) pipeline = pipe.Pipeline(stageDetectFace) print('Begin') try: while frame_reader.has_next(): #continue frame = frame_reader.next_frame() if frame is None: if mode == 'video': print("Wait for executor to finish it jobs") pipeline.put(None) break if mode == 'live': if re_source: print('Trying to connect the stream again ...') if cam_url is not None: frame_reader = URLFrameReader(cam_url, scale_factor=1, should_crop=True) continue print('Read frame', frame_counter, frame.shape) if frame_counter % Config.Frame.FRAME_INTERVAL == 0: # timer = Timer(frame_counter) task = pipe.Task(pipe.Task.Frame) task.package(frame=frame, frame_info=frame_counter) pipeline.put(task) # pipeline.put((frame, frame_counter, timer)) frame_counter += 1 print('Time elapsed: {}'.format(time.time() - start_time)) print('Avg FPS: {}'.format( (frame_counter + 1) / (time.time() - start_time))) frame_reader.release() ''' splitMerge.join() findSimilarFaceThread.join() ''' except KeyboardInterrupt: if Config.Track.TRACKING_VIDEO_OUT: video_out.release_tmp() pipeline.put(None) print('Keyboard Interrupt !!! Release All !!!') print('Time elapsed: {}'.format(time.time() - start_time)) print('Avg FPS: {}'.format( (frame_counter + 1) / (time.time() - start_time))) frame_reader.release() '''
def test_scale_factor(self): frame_reader = URLFrameReader( cam_url='%s//data/video/test-vin.mp4' % ROOT, scale_factor=2) frame = frame_reader.next_frame() self.assertEqual(frame.shape, (540, 960, 3))