def Catchvideo(WindowName, CameraId): CNN = Model(64 * 64 * 1, 2) cv2.namedWindow(WindowName) cap=cv2.VideoCapture(CameraId) sess = tf.Session() pnet, rnet, onet = detect_face.create_mtcnn(sess, None) minsize = 40 # minimum size of face threshold = [0.6, 0.7, 0.9] # three steps's threshold factor = 0.709 # scale factor while cap.isOpened(): ok,frame=cap.read() if not ok: break #灰度转换 img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) bounding_boxes, points = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor) nrof_faces = bounding_boxes.shape[0] for b in bounding_boxes:q img = frame[int(b[1]):int(b[3]), int(b[0]):int(b[2])] resule=CNN.mode(False,img) resule=resule[0] if(resule[0]>resule[1]): cv2.rectangle(frame, (int(b[0]), int(b[1])), (int(b[2]), int(b[3])), (0, 255, 0)) cv2.putText(frame, 'me', (int(b[0]) + 5, int(b[1]) + 5), cv2.FONT_HERSHEY_SIMPLEX, 1, (225, 0, 225), 4) cv2.imshow(WindowName, frame) if cv2.waitKey(10) & 0xff==ord('q'): break
def Catchvideo(Windowname, cameraId, NumPic, Path): cv2.namedWindow(Windowname) cap = cv2.VideoCapture(cameraId) sess = tf.Session() pnet, rnet, onet = detect_face.create_mtcnn(sess, None) num = 0 minsize = 40 # minimum size of face threshold = [0.6, 0.7, 0.9] # three steps's threshold factor = 0.709 # scale factor while cap.isOpened(): ok, frame = cap.read() if not ok: break bounding_boxes, points = detect_face.detect_face( frame, minsize, pnet, rnet, onet, threshold, factor) nrof_faces = bounding_boxes.shape[0] if (len(bounding_boxes) > 1): continue for b in bounding_boxes: img = frame[int(b[1]):int(b[3]), int(b[0]):int(b[2])] if (len(img) < 60): continue imgname = Path + "/" + "%d.jpg" % (num) num += 1 print(imgname) cv2.imwrite(imgname, img) cv2.rectangle(frame, (int(b[0]), int(b[1])), (int(b[2]), int(b[3])), (0, 255, 0)) cv2.imshow(Windowname, frame) if num > NumPic: break if cv2.waitKey(10) & 0xff == ord('q'): break cap.release() cv2.destroyAllWindows()
w, h = img.shape ret = np.empty((w, h, 3), dtype=np.uint8) ret[:, :, 0] = ret[:, :, 1] = ret[:, :, 2] = img return ret print('Creating networks and loading parameters') gpu_memory_fraction = 1.0 with tf.Graph().as_default(): gpu_options = tf.GPUOptions( per_process_gpu_memory_fraction=gpu_memory_fraction) sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False)) with sess.as_default(): pnet, rnet, onet = detect_face.create_mtcnn(sess, None) #restore facenet model print('建立facenet embedding模型') tf.reset_default_graph() sess = tf.Session() model_checkpoint_path = './facenet/model_check_point/model-20170512-110547.ckpt-250000' saver = tf.train.import_meta_graph( './facenet/model_check_point/model-20170512-110547.meta') saver.restore(sess, model_checkpoint_path) embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0") images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0") phase_train_placeholder = tf.get_default_graph().get_tensor_by_name( "phase_train:0") print('facenet embedding模型建立完毕')