def visualize_tracks(tracks, image_root, seq, args): frames = np.unique(tracks[:, 0]) start_frame = min(frames) save_dir = osp.join(image_root, "results", "tracking_noid", args.detector, seq) for f in range(1, int(max(frames))): data = tracks[tracks[:, 0] == int(f)] image_path = osp.join(image_root, "images", seq, "{:06d}.jpg".format(int(f))) frame = cv2.imread(image_path) if len(data) > 0: for box_data in data: identity = box_data[1] face_box = box_data[2:6].astype(int) body_box = box_data[6:].astype(int) # frame = draw_box_name(face_box, "{:02d}".format(int(identity)), frame) # frame = draw_box_name(body_box, "{:02d}".format(int(identity)), frame) frame = draw_box_name(face_box, "", frame) frame = draw_box_name(body_box, "", frame) save_path = osp.join(save_dir, "{:06d}_smoothed.jpg".format(int(f))) cv2.imwrite(save_path, frame) os.system( "ffmpeg -framerate 20 -start_number {} -i {}/%06d_smoothed.jpg {}/{}_smoothed.mp4" .format(start_frame, save_dir, save_dir, seq)) pass
def main(self): while cap.isOpened(): isSuccess,frame = cap.read() if isSuccess: try: # image = Image.fromarray(frame[...,::-1]) #bgr to rgb image = Image.fromarray(frame) bboxes, faces = mtcnn.align_multi(image, conf.face_limit, conf.min_face_size) bboxes = bboxes[:,:-1] #shape:[10,4],only keep 10 highest possibiity faces bboxes = bboxes.astype(int) bboxes = bboxes + [-1,-1,1,1] # personal choice results, score = learner.infer(conf, faces, targets, args.tta) # print(score[0]) for idx,bbox in enumerate(bboxes): if args.score: frame = draw_box_name(bbox, names[results[idx] + 1] + '_{:.2f}'.format(score[idx]), frame) else: if float('{:.2f}'.format(score[idx])) > .98: name = names[0] else: name = names[results[idx]+1] frame = draw_box_name(bbox, names[results[idx] + 1], frame) except: pass ret, jpeg = cv2.imencode('.jpg', frame) return jpeg.tostring() # cv2.imshow('Arc Face Recognizer', frame) if cv2.waitKey(1)&0xFF == ord('q'): break cap.release() cv2.destroyAllWindows()
def inference(self,conf,img): mtcnn = MTCNN() learner = face_learner(conf,True) learner.load_state(conf,'final.pth',True,True) learner.model.eval() targets, names = load_facebank(conf) image = Image.open(img) frame = cv2.imread(img,cv2.IMREAD_COLOR) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) try: bboxes, faces = mtcnn.align_multi(image, conf.face_limit, conf.min_face_size) bboxes = bboxes[:,:-1] #shape:[10,4],only keep 10 highest possibiity faces bboxes = bboxes.astype(int) bboxes = bboxes + [-1,-1,1,1] # personal choice results, score = learner.infer(conf, faces, targets, False) name = names[results[0]+1] frame = draw_box_name(bboxes[0], name, frame) except Exception as ex: name = "Can't detect face." h, w, c = frame.shape bbox = [int(h*0.5),int(w*0.5),int(h*0.5),int(w*0.5)] frame = draw_box_name(bbox, name, frame) return name, frame
def main(): while cap.isOpened(): isSuccess, frame = cap.read() match_score = None name = None det_image = None if isSuccess: try: # image = Image.fromarray(frame[...,::-1]) #bgr to rgb image = Image.fromarray(frame) # image = image.resize((500,500)) bboxes, faces = mtcnn.align_multi(image, conf.face_limit, conf.min_face_size) # shape:[10,4],only keep 10 highest possibiity faces bboxes = bboxes[:, :-1] bboxes = bboxes.astype(int) bboxes = bboxes + [-1, -1, 1, 1] # personal choice results, score = learner.infer(conf, faces, targets, args.tta) # print(score) # print(score[0]) match_score = "{:.2f}".format(score.data[0] * 100) # print(x) for idx, bbox in enumerate(bboxes): if args.score: frame = draw_box_name( bbox, names[results[idx] + 1] + '_{:.2f}'.format(score[idx]), frame) else: if float('{:.2f}'.format(score[idx])) > .98: match_score = None # name = names[0] # print(name) frame = draw_box_name(bbox, "unknown", frame) else: name = names[results[idx] + 1] match_score = match_score frame = draw_box_name(bbox, names[results[idx] + 1], frame) path = "/home/circle/Downloads/work-care-master/engine/dl/data/facebank" + \ str(name)+"/*.jpg" filenames = [img for img in glob.glob(path)] img = cv2.imread(filenames[0]) det_image = cv2.imencode('.jpg', img)[1].tostring() except: pass # print('detect error') ret, jpeg = cv2.imencode('.jpg', frame) return jpeg.tostring(), det_image, name, match_score
def fn_face_verify_module(): mtcnn = MTCNN() print('mtcnn loaded') learner = face_learner(conf, True) learner.threshold = args.threshold if conf.device.type == 'cpu': learner.load_state(conf, 'cpu_final.pth', True, True) else: learner.load_state(conf, 'final.pth', True, True) learner.model.eval() print('learner loaded') if args.update: targets, names = prepare_facebank(conf, learner.model, mtcnn, tta=args.tta) print('facebank updated') else: targets, names = load_facebank(conf) print('facebank loaded') isSuccess, frame = cap.read() if isSuccess: try: image = Image.fromarray(frame) bboxes, faces = mtcnn.align_multi(image, conf.face_limit, conf.min_face_size) bboxes = bboxes[:, : -1] # shape:[10,4],only keep 10 highest possibiity faces bboxes = bboxes.astype(int) bboxes = bboxes + [-1, -1, 1, 1] # personal choice results, score = learner.infer(conf, faces, targets, args.tta) for idx, bbox in enumerate(bboxes): if args.score: frame = draw_box_name( bbox, names[results[idx] + 1] + '_{:.2f}'.format(score[idx]), frame) else: frame = draw_box_name(bbox, names[results[idx] + 1], frame) except: print('detect error') cv2.imshow('face Capture', frame) if args.save: video_writer.write(frame)
def authenticuser(path,userid): conf = get_config(False) mtcnn = MTCNN() print('mtcnn loaded') learner = face_learner(conf, True) learner.threshold = 1.35 learner.load_state(conf, 'cpu_final.pth', True, True) learner.model.eval() print('learner loaded') targets = load_facebank_user(conf,userid) names=['Unknown',userid] print('facebank loaded') count =0 while True: frame = cv2.imread(path) #try: image = Image.fromarray(frame) bboxes, faces = mtcnn.align_multi(image, conf.face_limit, conf.min_face_size) bboxes = bboxes[:,:-1] #shape:[10,4],only keep 10 highest possibiity faces bboxes = bboxes.astype(int) bboxes = bboxes + [-1,-1,1,1] # personal choice results, score = learner.infer(conf, faces, targets) for idx,bbox in enumerate(bboxes): frame = draw_box_name(bbox, names[results[idx] + 1] + '_{:.2f}'.format(100-score[idx]), frame) result={"_result":"success", "User Verified with":{"confidence": '{:.2f}%'.format(100-score[idx]), "userid": names[results[idx] + 1] , "error": "Success"}} accuracy.append('{:.2f}'.format(100-score[idx])) user.append(names[results[idx] + 1]) print( names[results[idx] + 1],'{:.2f}'.format(100-score[idx])) count=1 #except: # print('detect error') if count>0: break return result
def infer_general_image(self, image, plot_result=True, tta=False): # image should be in cv2 format # If no facebank --> return None # If plot_result = True --> return annotated image # If plot_result = False --> return cropped faces and their predicted ID target_embs = self.targets names = self.names if target_embs is None or names is None: print("No facebank Detected ==> CANT infering!") return None origin_image = image faces, boxes = self.detect_model.detect_face(image) list_imgs_to_recognise = [] for face in faces: # align img, _ = self.alignment_model.align(face) # Convert to BGR (IMPORTANT) img = convert_pil_rgb2bgr(img) list_imgs_to_recognise.append(img) # recognise predicted_names, predicted_distances = self.infer( list_imgs_to_recognise, tta=tta) if plot_result: boxes = boxes.astype(int) boxes = boxes + [-1, -1, 1, 1] # personal choice for idx, box in enumerate(boxes): image = draw_box_name(box, predicted_names[idx], origin_image) return Image.fromarray(image) return faces, names
def fn_face_verify(): isSuccess, frame = cap.read() if isSuccess: try: image = Image.fromarray(frame) bboxes, faces = mtcnn.align_multi(image, conf.face_limit, conf.min_face_size) bboxes = bboxes[:, :-1] # shape:[10,4],only keep 10 highest possibiity faces bboxes = bboxes.astype(int) bboxes = bboxes + [-1, -1, 1, 1] # personal choice results, score = learner.infer(conf, faces, targets, args.tta) #tolist_results=results.tolist() #print(tolist_results) #이건 어떻게 바꿀지 잘 모르겠다! for idx, bbox in enumerate(bboxes): if args.score: frame = draw_box_name(bbox, names[results[idx] + 1] + '_{:.2f}'.format(score[idx], frame)) URL=server+"learn" json_feed={'name':names[results[idx]+1]} response = requests.post(URL, json=json_feed) print("----------------") print(names[results[idx] + 1]) else: frame = draw_box_name(bbox, names[results[idx] + 1], frame) URL = server + "learn" json_feed = {'name': names[results[idx] + 1]} response = requests.post(URL, json=json_feed) print(">>>>>>>>>>>>>>>") print(names[results[idx] + 1]) except: print('detect error') cv2.imshow('face Capture',frame) if args.save: video_writer.write(frame)
def main(self): while cap.isOpened(): isSuccess, frame = cap.read() if isSuccess: try: # image = Image.fromarray(frame[...,::-1]) #bgr to rgb image = Image.fromarray(frame) bboxes, faces = mtcnn.align_multi(image, conf.face_limit, conf.min_face_size) bboxes = bboxes[:, : -1] #shape:[10,4],only keep 10 highest possibiity faces bboxes = bboxes.astype(int) bboxes = bboxes + [-1, -1, 1, 1] # personal choice results, score = learner.infer(conf, faces, targets, args.tta) print(score) # print(score[0]) for idx, bbox in enumerate(bboxes): if args.score: frame = draw_box_name( bbox, names[results[idx] + 1] + '_{:.2f}'.format(score[idx]), frame) else: if float('{:.2f}'.format(score[idx])) > 1: name = names[0] print(name) frame = draw_box_name(bbox, "unknown", frame) else: name = names[results[idx] + 1] print(name, "extra") frame = draw_box_name(bbox, names[results[idx] + 1], frame) except: pass ret, jpeg = cv2.imencode('.jpg', frame) return jpeg.tostring()
def get_pic(): isSuccess, frame = cap.read() if isSuccess: try: image = Image.fromarray(frame) bboxes, faces = mtcnn.align_multi(image, conf.face_limit, conf.min_face_size) bboxes = bboxes[:, : -1] # shape:[10,4],only keep 10 highest possibiity faces bboxes = bboxes.astype(int) bboxes = bboxes + [-1, -1, 1, 1] # personal choice face_list = [] for idx, bbox in enumerate(bboxes): face_list.append(np.array(faces[idx]).tolist()) URL = server + "register_check" json_feed_verify = {'face_list': face_list} start_time = datetime.now() response = requests.post(URL, json=json_feed_verify) finish_time = datetime.now() print('register check time:', finish_time - start_time) print(response) check_list = response.json()["check_list"] for idx, bbox in enumerate(bboxes): if check_list[idx] == 'unknown': frame[bbox[1]:bbox[3], bbox[0]:bbox[2]] = cv2.blur( frame[bbox[1]:bbox[3], bbox[0]:bbox[2]], (23, 23)) else: frame = draw_box_name(bbox, "known", frame) cv2.imshow("My Capture", frame) except: print("detect error") if cv2.waitKey(1) & 0xFF == ord('t'): p = Image.fromarray(frame[..., ::-1]) try: warped_face = np.array(mtcnn.align(p))[..., ::-1] re_img = mtcnn.align(p) tolist_face = np.array(re_img).tolist() URL = server + "register" tolist_img = warped_face.tolist() json_feed = {'face_image': tolist_face} sregister = datetime.now() response = requests.post(URL, json=json_feed) fregister = datetime.now() except: print('no face captured')
def get_pic(): isSuccess, frame = cap.read() if isSuccess: try: image = Image.fromarray(frame) frame_to_server = np.array(image).tolist() print(np.array(image)) URL = server + "getframe" json_feed_frame = {'frame_to_server': frame_to_server} response = requests.post(URL, json=json_feed_frame) #여기부터 bboxes, faces = mtcnn.align_multi(image, conf.face_limit, conf.min_face_size) bboxes = bboxes[:, : -1] # shape:[10,4],only keep 10 highest possibiity faces bboxes = bboxes.astype(int) bboxes = bboxes + [-1, -1, 1, 1] # personal choice face_list = [] for idx, bbox in enumerate(bboxes): face_list.append(np.array(faces[idx]).tolist()) # start=time.time() URL = server + "register_check" json_feed_verify = {'face_list': face_list} # 보냄. response = requests.post(URL, json=json_feed_verify) # 받아오는 것. print(response) check_list = response.json()["check_list"] #여기까지 서버의 getframe측에서 보내주어야함. for idx, bbox in enumerate(bboxes): if check_list[idx] == 'unknown': frame[bbox[1]:bbox[3], bbox[0]:bbox[2]] = cv2.blur( frame[bbox[1]:bbox[3], bbox[0]:bbox[2]], (23, 23)) else: frame = draw_box_name(bbox, "known", frame) cv2.imshow("My Capture", frame) except: print("detect error") # 사진찍은거 넘겨주는 부분, if cv2.waitKey(1) & 0xFF == ord('t'): p = Image.fromarray(frame[..., ::-1]) try: re_img = mtcnn.align(p) tolist_face = np.array(re_img).tolist() URL = server + "register" json_feed = {'face_image': tolist_face} response = requests.post(URL, json=json_feed) except: print('no face captured')
def ddeep(): isSuccess, frame = cap.read() if isSuccess: try: image = Image.fromarray(frame) bboxes, faces = mtcnn.align_multi(image, conf.face_limit, conf.min_face_size) bboxes = bboxes[:, :-1] bboxes = bboxes.astype(int) bboxes = bboxes + [-1, -1, 1, 1] face_list = [] for idx, bbox in enumerate(bboxes): face_list.append(np.array(faces[idx]).tolist()) URL = server + "register_check" json_feed = {'face_list': face_list} response = requests.post(URL, json=json_feed) check_list = response.json()["check_list"] for idx, bbox in enumerate(bboxes): if check_list[idx] == 'unknown': frame[bbox[1]:bbox[3], bbox[0]:bbox[2]] = cv2.blur( frame[bbox[1]:bbox[3], bbox[0]:bbox[2]], (23, 23)) else: frame = draw_box_name(bbox, "known", frame) cv2.imshow('DDeeP', frame) except: print("Sorry ") if cv2.waitKey(1) & 0xFF == ord('r'): p = Image.fromarray(frame[..., ::-1]) try: register_face = np.array(mtcnn.align(p))[..., ::-1] name = 'A' URL = server + "register" tolist_face = register_face.tolist() json_feed = { 'register_image': tolist_face, 'register_name': name } response = requests.post(URL, json=json_feed) print(response) except: print('no face captured') # 키보드에서 c를 누르면 confirm if cv2.waitKey(0) & 0xFF == ord('c'): URL = server + "ReadFeature" params = {'name': 'A'} res = requests.get(URL, params=params) res = res.json() res = res['result'] print(res) # 키보드에서 n를 누르면 name update if cv2.waitKey(0) & 0xFF == ord('n'): URL = server + 'update' params = {'old_name': 'A', 'new_name': 'NEW'} res = requests.get(URL, params=params) print(res.text) # 키보드에서 u를 누르면 등록된 얼굴을 update할 수 있다. if cv2.waitKey(0) & 0xFF == ord('u'): newpic = Image.fromarray(frame[..., ::-1]) new_img = np.array(newpic).tolist() URL = server + 'update' json_feed = {'name': 'NEW', 'new_image': new_img} res = requests.post(URL, json=json_feed) # 키보드에서 d를 누르면 삭제가능. if cv2.waitKey(0) & 0xFF == ord('d'): URL = server + 'delete' params = {'name': 'NEW'} res = requests.delete(URL, params=params) print(res.text)
names.append("{:02d}".format(int(names[-1]) + 1)) for j, bodybbox in enumerate(body_bboxes): merged_box = merge_box(bodybbox, bboxes[max_inds[j]]) if max_inds[j] != -1: # match for the previous frame using IOU iou = np_vec_no_jit_iou( np.array([bboxes[max_inds[j]]]), bboxes0) max_iou, max_ind = np.max(iou, axis=1), np.argmax(iou, axis=1) print(iou, max_ind) if max_iou >= 0.5: frame = draw_box_name(merged_box, names0[max_ind[0]], frame) frame = draw_box_name(bboxes[max_inds[j]], names0[max_ind[0]], frame) curr_names.append(names0[max_ind[0]]) else: frame = draw_box_name( merged_box, names[results[max_inds[j]] + 1], frame) frame = draw_box_name( bboxes[max_inds[j]], names[results[max_inds[j]] + 1], frame) curr_names.append(names[results[max_inds[j]] + 1]) curr_boxes.append(bboxes[max_inds[j]]) if len(curr_names) != 0: names0 = curr_names
img_size = 112 margin = 0 img_h, img_w, _ = frame.shape start_time = time.time() results, score = infer(model=model, conf=conf, faces=aligned_faces, target_embs=targets, tta=False) print('Duration: {}'.format(time.time() - start_time)) # results, score = infer(model=model, conf=conf, faces=aligned_faces, target_embs=targets, tta=True) for idx, bbox in enumerate(bboxes): x1, y1, x2, y2 = bbox[0], bbox[1], bbox[2], bbox[3] xw1 = max(int(x1 - margin), 0) yw1 = max(int(y1 - margin), 0) xw2 = min(int(x2 + margin), img_w - 1) yw2 = min(int(y2 + margin), img_h - 1) bbox = [xw1, yw1, xw2, yw2] # frame = draw_box_name(bbox, names[results[idx] + 1] + '_{:.2f}'.format(score[idx]), frame) frame = draw_box_name(bbox, names[results[idx] + 1], frame) # frame = cv2.resize(frame, dsize=None ,fx=0.25, fy=0.25) video_writer.write(frame) # cv2.imshow('window', frame) # if cv2.waitKey(0) == ord('q'): # break cap.release() video_writer.release()
for fil in path.iterdir(): # if not fil.is_file(): # continue # else: print(fil) frame = cv2.imread(str(fil)) image = Image.fromarray(frame) bboxes, faces = mtcnn.align_multi(image, conf.face_limit, conf.min_face_size) bboxes = bboxes[:, : -1] #shape:[10,4],only keep 10 highest possibiity faces bboxes = bboxes.astype(int) bboxes = bboxes + [-1, -1, 1, 1] # personal choice results, score = learner.infer(conf, faces, targets, args.tta) for idx, bbox in enumerate(bboxes): pred_name = names[results[idx] + 1] frame = draw_box_name( bbox, pred_name + '_{:.2f}'.format(score[idx]), frame) if pred_name in fil.name: counts[pred_name][1] += 1 else: orig_name = ''.join([ i for i in fil.name.split('.')[0] if not i.isdigit() ]) counts[orig_name][0] += 1 # new_name = '_'.join(str(fil).split('/')[-2:]) # print(verify_dir/fil.name) cv2.imwrite(str(verify_fold_dir / fil.name), frame) print(counts)
def main(_argv): os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu logger = tf.get_logger() logger.disabled = True logger.setLevel(logging.FATAL) set_memory_growth() cfg = load_yaml(FLAGS.cfg_path) model = ArcFaceModel(size=cfg['input_size'], backbone_type=cfg['backbone_type'], training=False) ckpt_path = tf.train.latest_checkpoint('./checkpoints/' + cfg['sub_name']) if ckpt_path is not None: print("[*] load ckpt from {}".format(ckpt_path)) model.load_weights(ckpt_path) else: print("[*] Cannot find ckpt from {}.".format(ckpt_path)) exit() if FLAGS.update: print('Face bank updating...') targets, names = prepare_facebank(cfg, model) print('Face bank updated') else: targets, names = load_facebank(cfg) print('Face bank loaded') if FLAGS.video is None: cap = cv2.VideoCapture(0) else: cap = cv2.VideoCapture(str(FLAGS.video)) if FLAGS.save: video_writer = cv2.VideoWriter('./recording.avi', cv2.VideoWriter_fourcc(*'XVID'), 10, (640, 480)) # frame rate 6 due to my laptop is quite slow... while cap.isOpened(): is_success, frame = cap.read() if is_success: img = frame img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) bboxes, landmarks, faces = align_multi( cfg, img, min_confidence=FLAGS.min_confidence, limits=3) bboxes = bboxes.astype(int) embs = [] for face in faces: if len(face.shape) == 3: face = np.expand_dims(face, 0) face = face.astype(np.float32) / 255. embs.append(l2_norm(model(face)).numpy()) list_min_idx = [] list_score = [] for emb in embs: dist = [euclidean(emb, target) for target in targets] min_idx = np.argmin(dist) list_min_idx.append(min_idx) list_score.append(dist[int(min_idx)]) list_min_idx = np.array(list_min_idx) list_score = np.array(list_score) list_min_idx[list_score > FLAGS.threshold] = -1 for idx, box in enumerate(bboxes): frame = utils.draw_box_name(box, landmarks[idx], names[list_min_idx[idx] + 1], frame) frame = cv2.resize(frame, (640, 480)) cv2.imshow('face Capture', frame) key = cv2.waitKey(1) & 0xFF if FLAGS.save: video_writer.write(frame) if key == ord('q'): break cap.release() if FLAGS.save: video_writer.release() cv2.destroyAllWindows()
str(conf.data_path / 'recording.mov'), cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), 6, (1280, 720)) while cap.isOpened(): # start_time = time.time() _, frame = cap.read() if frame is None: break start_time = time.time() bbs, fcs = model.find_faces(frame, conf) for bb, fc in zip(bbs, fcs): # start_time = time.time() emb = model.get_feature(fc) name = get_name(emb, labels) frame = draw_box_name(bb, name, frame) cv2.putText(frame, 'FPS: ' + str(1.0 / (time.time() - start_time)), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA) # cv2.imshow('face Capture', frame) print(name + ' FPS: ' + str(1.0 / (time.time() - start_time))) # save video if args.save: video_writer.write(frame) if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() if args.save: video_writer.release()
image = Image.open("/media/velab/dati/faces_emore/test/test2.jpeg") #image.show() #input("FIRST IMAGE") # faces rappresenta un arra bboxes, faces = mtcnn.align_multi(image, conf.face_limit, conf.min_face_size) #faces[0].show() #input("IMAGE CROPPED") bboxes = bboxes[:, : -1] # shape:[10,4],only keep 10 highest possibiity image bboxes = bboxes.astype(int) bboxes = bboxes + [-1, -1, 1, 1] # personal choice results, score = learner.infer(conf, faces, targets, args.tta) print(results, score) input("RESULT") image_cv = numpy.array(image) image_cv = image_cv[:, :, ::-1].copy() for idx, bbox in enumerate(bboxes): if args.score: image_cv = draw_box_name( bbox, names[results[idx] + 1] + '_{:.2f}'.format(score[idx]), image_cv) else: image_cv = draw_box_name(bbox, names[results[idx] + 1], image_cv) cv2.imshow('face Capture', image_cv) cv2.waitKey()
def DDeeP(): isSuccess, frame = cap.read() if isSuccess: try: global name image = Image.fromarray(frame) bboxes, faces = mtcnn.align_multi(image, conf.face_limit, conf.min_face_size) bboxes = bboxes[:, : -1] # shape:[10,4],only keep 10 highest possibiity faces bboxes = bboxes.astype(int) bboxes = bboxes + [-1, -1, 1, 1] # personal choice face_list = [] for idx, bbox in enumerate(bboxes): face_list.append(np.array(faces[idx]).tolist()) URL = server + "register_check" json_feed_verify = {'face_list': face_list} response = requests.post(URL, json=json_feed_verify) check_list = response.json()["check_list"] for idx, bbox in enumerate(bboxes): if check_list[idx] == 'unknown': frame[bbox[1]:bbox[3], bbox[0]:bbox[2]] = cv2.blur( frame[bbox[1]:bbox[3], bbox[0]:bbox[2]], (23, 23)) else: frame = draw_box_name(bbox, "known", frame) cv2.imshow("My Capture", frame) except: print("detect error") if cv2.waitKey(1) & 0xFF == ord('t'): p = Image.fromarray(frame[..., ::-1]) try: warped_face = np.array(mtcnn.align(p))[..., ::-1] re_img = mtcnn.align(p) tolist_face = np.array(re_img).tolist() #name 이부분에서 입력받도록 해야함. name = 'Seo Yeon' URL = server + "register" tolist_img = warped_face.tolist() json_feed = {'face_list': tolist_face, 'register_name': name} response = requests.post(URL, json=json_feed) except: print('no face captured') if cv2.waitKey(0) & 0xFF == ord('c'): URL = server + "ReadFeature" params = {'name': name} res = requests.get(URL, params=params) res = res.json() res = res['result'] print(res) # 키보드에서 n를 누르면 name update 이부분에 대해서는 추후에 업데이트 기능을 만들도록. if cv2.waitKey(0) & 0xFF == ord('n'): URL = server + 'update' new_name = 'NEW' params = {'old_name': name, 'new_name': new_name} name = new_name res = requests.get(URL, params=params) print(res.text) # 키보드에서 u를 누르면 등록된 얼굴을 update할 수 있다. if cv2.waitKey(0) & 0xFF == ord('u'): newpic = Image.fromarray(frame[..., ::-1]) new_img = np.array(newpic).tolist() URL = server + 'update' json_feed = {'name': name, 'new_image': new_img} res = requests.post(URL, json=json_feed) # 키보드에서 d를 누르면 삭제가능. if cv2.waitKey(0) & 0xFF == ord('d'): URL = server + 'delete' #이 부분을 변수화해야함. params = {'name': name} res = requests.delete(URL, params=params) print(res.text)
# merge the body box with the matched detection merged_box = merge_box(bodybbox, detected_box) # match for the previous frame using IOU # iou = np_vec_no_jit_iou(np.array([detected_box]), bboxes0) # max_iou, max_ind = np.max(iou, axis=1), np.argmax(iou, axis=1) # print(iou, max_ind) # if max_iou >= 0.5: # # we have an IoU over 0.5, then there's match. We use the previous frame information # identity = names0[max_ind[0]] # frame = draw_box_name(merged_box, identity, frame) # frame = draw_box_name(detected_box, identity, frame) # curr_names.append(identity) # else: # otherwise, look up in the face bank identity = names[results[matched_detection] + 1] frame = draw_box_name(merged_box, "", frame) frame = draw_box_name(detected_box, "", frame) curr_names.append(identity) curr_boxes.append(detected_box) out.append([ int(path.replace(".jpg", "")), identity, detected_box[0], detected_box[1], detected_box[2], detected_box[3], merged_box[0], merged_box[1], merged_box[2], merged_box[3] ]) else: face_bbox = openpose_face_bboxes[j] merged_box = merge_box(bodybbox, face_bbox) frame = draw_box_name(merged_box, "", frame) out.append([
except: bboxes = [] faces = [] if len(bboxes) == 0: print('no face') continue else: bboxes = bboxes[:, : -1] #shape:[10,4],only keep 10 highest possibiity faces bboxes = bboxes.astype(int) bboxes = bboxes + [-1, -1, 1, 1] # personal choice results, score = learner.infer(conf, faces, targets, True) for idx, bbox in enumerate(bboxes): if args.score: frame = draw_box_name( bbox, names[results[idx] + 1] + '_{:.2f}'.format(score[idx]), frame) else: frame = draw_box_name(bbox, names[results[idx] + 1], frame) cv2.imshow('img', frame) cv2.waitKey(10) video_writer.write(frame) else: break if args.duration != 0: i += 1 if i % 25 == 0: print('{} second'.format(i // 25)) if i > 25 * args.duration: break
shutil.rmtree(bank_path) os.mkdir(bank_path) cap = cv2.VideoCapture(0) model = face_model.FaceModel(conf) while cap.isOpened(): isSuccess,frame = cap.read() key = cv2.waitKey(1)&0xFF bbs, fcs = model.find_faces(frame, conf) if len(bbs)==1: frame = draw_box_name(bbs[0], "", frame) key = cv2.waitKey(1)&0xFF if key == ord('q') or key == 27: break if key == ord('t'): img = fcs[0][0] cv2.imwrite(bank_path+'/'+str('{}.jpg'.format(str(datetime.now())[:-7].replace(":","-").replace(" ","-"))), img) print('da chup') key = cv2.waitKey(1)&0xFF if key == ord('q'): break cv2.imshow('camera', frame) cap.release() cv2.destroyAllWindows()
def test(self,conf,img_dir,update=False,view_score=False,view_error=False): #Load models mtcnn = MTCNN() learner = face_learner(conf, True) if conf.device.type == 'cpu': learner.load_state(conf,'cpu_final.pth',True,True) else: learner.load_state(conf,'final.pth',True,True) learner.model.eval() #Load Facebank if update: targets, names = prepare_facebank(conf, learner.model, mtcnn, False) print('facebank updated') else: targets, names = load_facebank(conf) print('facebank loaded') #Load Image list img_list = glob(img_dir + '**/*.jpg') acc = 0 detect_err=0 fails = [] print(f"{'Found':^15}{'Name':^20}{'Result':^15}{'Score':^15}") pbar = enumerate(img_list) pbar = tqdm(pbar, total = len(img_list)) for i, x in pbar: preds = [] label = str(os.path.dirname(x)) label = os.path.basename(label) image = Image.open(x) frame = cv2.imread(x,cv2.IMREAD_COLOR) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) try: bboxes, faces = mtcnn.align_multi(image, conf.face_limit, conf.min_face_size) bboxes = bboxes[:,:-1] #shape:[10,4],only keep 10 highest possibiity faces bboxes = bboxes.astype(int) bboxes = bboxes + [-1,-1,1,1] # personal choice results, score = learner.infer(conf, faces, targets, False) for idx,bbox in enumerate(bboxes): print(f'{Label}: {score[idx]}') if view_score: frame = draw_box_name(bbox, names[results[idx] + 1] + '_{:.2f}'.format(score[idx]), frame) else: frame = draw_box_name(bbox, names[results[idx] + 1], frame) preds.append(names[results[idx]+1]) if label in preds: acc += 1 else: fails.append([label,preds]) # Image.fromarray(frame,'RGB').show() except Exception as ex: fails.append([label,ex]) detect_err += 1 f = len(bboxes) tf = str(True if label in preds else False) t = f'{f:^15}{label:^20}{tf:^15}{acc/(i+1):^15.4}' pbar.set_description(t) if detect_err>0: print(f'Detect Error: {detect_err}') if view_error: pp(fails) else: print(f'If you want to see details, make veiw_error True.') print(f'Accuracy: {acc/len(img_list)}')
for i in range(5): try: image = Image.fromarray(img[i][..., ::-1]) #bgr to rgb # image = Image.fromarray(img[i]) print('----------------------------------') bboxes, faces = mtcnn.align_multi(image, conf.face_limit, conf.min_face_size) bboxes = bboxes[:, : -1] #shape:[10,4],only keep 10 highest possibiity faces bboxes = bboxes.astype(int) bboxes = bboxes + [-1, -1, 1, 1] # personal choice results, score = face_compare(conf, learner.model, faces, targets, args.tta) num_face = len( results) #len(results)가 얼굴개수가나오므로 num_face라는 변수 서연이 만듬. print(num_face) for idx, bbox in enumerate(bboxes): if args.score: #args.score는 주로 false로 나오기때문에 boundingbox옆에 score가 나오게 하려면 else쪽으로 넣어야함. img[i] = draw_box_name(bbox, names[results[idx] + 1], img[i]) else: img[i] = draw_box_name( bbox, names[results[idx] + 1] + '_{:.2f}'.format(score[idx]), img[i]) except: print('detect error') cv2.imwrite('data/output/img_{}.jpg'.format(i), img[i])