def gen_onet_data(data_dir, anno_file, pnet_model_file, rnet_model_file, prefix_path='', use_cuda=True, vis=False): pnet, rnet, _ = create_mtcnn_net(p_model_path=pnet_model_file, r_model_path=rnet_model_file, use_cuda=use_cuda) mtcnn_detector = MtcnnDetector(pnet=pnet, rnet=rnet, min_face_size=12) imagedb = ImageDB(anno_file, mode="test", prefix_path=prefix_path) imdb = imagedb.load_imdb() image_reader = TestImageLoader(imdb, 1, False) all_boxes = list() batch_idx = 0 for databatch in image_reader: if batch_idx % 100 == 0: print("%d images done" % batch_idx) im = databatch t = time.time() p_boxes, p_boxes_align = mtcnn_detector.detect_pnet(im=im) boxes, boxes_align = mtcnn_detector.detect_rnet(im=im, dets=p_boxes_align) if boxes_align is None: all_boxes.append(np.array([])) batch_idx += 1 continue if vis: rgb_im = cv2.cvtColor(np.asarray(im), cv2.COLOR_BGR2RGB) vision.vis_two(rgb_im, boxes, boxes_align) t1 = time.time() - t t = time.time() all_boxes.append(boxes_align) batch_idx += 1 save_path = config.MODEL_STORE_DIR if not os.path.exists(save_path): os.mkdir(save_path) save_file = os.path.join(save_path, "detections_%d.pkl" % int(time.time())) with open(save_file, 'wb') as f: cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL) gen_onet_sample_data(data_dir, anno_file, save_file, prefix_path)
def __init__(self): import os before_folder = os.path.abspath('.') os.chdir( os.path.join('/'.join(Face.__module__.split('.')[:-1]), 'DFace')) from dface.core.detect import create_mtcnn_net, MtcnnDetector pnet, rnet, onet = create_mtcnn_net( p_model_path="./model_store/pnet_epoch.pt", r_model_path="./model_store/rnet_epoch.pt", o_model_path="./model_store/onet_epoch.pt", use_cuda=False) self.detector = MtcnnDetector( pnet=pnet, rnet=rnet, onet=onet, min_face_size=64) os.chdir(before_folder)
import cv2 from dface.core.detect import create_mtcnn_net, MtcnnDetector import dface.core.vision as vision import numpy as np import torch if __name__ == '__main__': pnet, rnet, onet = create_mtcnn_net( p_model_path="./model_store/pnet_epoch.pt", r_model_path="./model_store/rnet_epoch.pt", o_model_path="./model_store/onet_epoch.pt", use_cuda=True) mtcnn_detector = MtcnnDetector(pnet=pnet, rnet=rnet, onet=onet, min_face_size=24) img = cv2.imread("./test.jpg") img_bg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) with torch.no_grad(): bboxs, landmarks = mtcnn_detector.detect_face(img) vision.vis_face(img_bg, bboxs, landmarks)
def face_detection(): global face_out global ready #face detection model p_model_path = "./weights/pnet_epoch.pt" r_model_path = "./weights/rnet_epoch.pt" o_model_path = "./weights/onet_epoch.pt" pnet, rnet, onet = create_mtcnn_net(p_model_path, r_model_path, o_model_path, use_cuda=True) mtcnn_detector = MtcnnDetector(pnet=pnet, rnet=rnet, onet=onet, min_face_size=24) assert os.path.exists(p_model_path), "pnet Model Path is not exist!" assert os.path.exists(r_model_path), "rnet Model Path is not exist!" assert os.path.exists(o_model_path), "onet Model Path is not exist!" #face classification model cls_model_path = './weights/cnet_final.pth' assert os.path.exists(cls_model_path), "Cls Model Path is not exist!" net = CNet() net.load_state_dict(torch.load(cls_model_path)) transform = ToTensor() #get face data (used for training) #save_dir='./data/null/' #if not os.path.isdir(save_dir): # os.mkdir(save_dir) #打开摄像头 capture = cv2.VideoCapture(0) i = 0 while True: ready = True print(ready) timer1 = time.time() ret, frame = capture.read() faces, _ = mtcnn_detector.detect_face(frame) for (top_x, top_y, bottom_x, bottom_y, s) in faces: i = i + 1 top_x = int(top_x) top_y = int(top_y) bottom_x = int(bottom_x) bottom_y = int(bottom_y) #矩形标记 cv2.rectangle(frame, (int(top_x), int(top_y)), (int(bottom_x), int(bottom_y)), (0, 255, 0), 2) frame_save = frame[top_y:bottom_y, top_x:bottom_x, :] try: #cv2.imwrite(save_dir+str(i)+'.jpg',frame_save) cls_input = transform(cv2.resize(frame_save, (28, 28))).unsqueeze(0) except: continue out = net(cls_input) cls = torch.argmax(out, dim=1) print((top_x, top_y, bottom_x, bottom_y), '\t', cls.item()) face_out = cls.item() timer2 = time.time() #print(timer2-timer1) #显示图片 cv2.imshow("faces in video", frame) #暂停窗口 if cv2.waitKey(5) & 0xFF == ord('q'): break #释放资源 capture.release() #销毁窗口 cv2.destroyAllWindows()
import cv2 from dface.core.detect import create_mtcnn_net, MtcnnDetector import dface.core.vision as vision if __name__ == '__main__': #face detection model #git from https://github.com/kuaikuaikim/DFace.git pnet, rnet, onet = create_mtcnn_net(p_model_path="./weights/pnet_epoch.pt", r_model_path="./weights/rnet_epoch.pt", o_model_path="./weights/onet_epoch.pt", use_cuda=False) mtcnn_detector = MtcnnDetector(pnet=pnet, rnet=rnet, onet=onet, min_face_size=24) img = cv2.imread("./test.jpg") img_bg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) #b, g, r = cv2.split(img) #img2 = cv2.merge([r, g, b]) bboxs, landmarks = mtcnn_detector.detect_face(img) # print box_align vision.vis_face(img_bg,bboxs,landmarks)
import cv2 from dface.core.detect import create_mtcnn_net, MtcnnDetector import dface.core.vision as vision if __name__ == '__main__': pnet, rnet, onet = create_mtcnn_net( p_model_path="./model_store/pnet_epoch_1.pt", r_model_path="./model_store/rnet_epoch_1.pt", o_model_path="./model_store/onet_epoch_10_tucker2.pt", use_cuda=False, use_tucker2=True) mtcnn_detector = MtcnnDetector(pnet=pnet, rnet=rnet, onet=onet, min_face_size=24) img = cv2.imread("./test.jpg") img_bg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) #b, g, r = cv2.split(img) #img2 = cv2.merge([r, g, b]) bboxs, landmarks = mtcnn_detector.detect_face(img) # print box_align vision.vis_face(img_bg, bboxs, landmarks)
#暂停窗口 if cv2.waitKey(5) & 0xFF == ord('q'): break #释放资源 capture.release() #销毁窗口 cv2.destroyAllWindows() if __name__ == "__main__": #face detection model p_model_path = "./weights/pnet_epoch.pt" r_model_path = "./weights/rnet_epoch.pt" o_model_path = "./weights/onet_epoch.pt" pnet, rnet, onet = create_mtcnn_net(p_model_path, r_model_path, o_model_path, use_cuda=True) mtcnn_detector = MtcnnDetector(pnet=pnet, rnet=rnet, onet=onet, min_face_size=24) assert os.path.exists(p_model_path), "pnet Model Path is not exist!" assert os.path.exists(r_model_path), "rnet Model Path is not exist!" assert os.path.exists(o_model_path), "onet Model Path is not exist!" #face classification model cls_model_path = './weights/cnet_final.pth' assert os.path.exists(cls_model_path), "Cls Model Path is not exist!" net = CNet() net.load_state_dict(torch.load(cls_model_path)) transform = ToTensor()
os.makedirs(out_dir) if not os.path.exists(args.video_path): sys.exit('Video does not exist') # ResNet50 structure model = hopenet.Hopenet(torchvision.models.resnet.Bottleneck, [3, 4, 6, 3], 66) # Dlib face detection model # cnn_face_detector = dlib.cnn_face_detection_model_v1(args.face_model) pnet, rnet, onet = create_mtcnn_net( p_model_path= "/home/nihar/Desktop/HRIProject/DFace/model_store/pnet_epoch.pt", r_model_path= "/home/nihar/Desktop/HRIProject/DFace/model_store/rnet_epoch.pt", o_model_path= "/home/nihar/Desktop/HRIProject/DFace/model_store/onet_epoch.pt", use_cuda=True) mtcnn_detector = MtcnnDetector(pnet=pnet, rnet=rnet, onet=onet, min_face_size=24) print('Loading snapshot.') # Load snapshot saved_state_dict = torch.load(snapshot_path) model.load_state_dict(saved_state_dict) print('Loading data.')
return 'others' if __name__ == '__main__': facerec = dlib.face_recognition_model_v1( './dlib_face_recognition_resnet_model_v1.dat') sp = dlib.shape_predictor('./shape_predictor_5_face_landmarks.dat') # refer to your local model path p_model = "./model_store/pnet_epoch.pt" r_model = "./model_store/rnet_epoch.pt" o_model = "./model_store/onet_epoch.pt" #use cpu version set use_cuda=False, if you want to use gpu version set use_cuda=True pnet, rnet, onet = create_mtcnn_net(p_model_path=p_model, r_model_path=r_model, o_model_path=o_model, use_cuda=False) mtcnn_detector = MtcnnDetector(pnet=pnet, rnet=rnet, onet=onet, min_face_size=24) # img = cv2.imread("./test.jpg") # b, g, r = cv2.split(img) # img2 = cv2.merge([r, g, b]) capture = cv2.VideoCapture(0) j = 0 while True: ret, frame = capture.read() bboxs, landmarks = mtcnn_detector.detect_face(frame)
import time import numpy as np from skimage import transform, img_as_float from dface.core.detect import MtcnnDetector, create_mtcnn_net import os from YFYF.Tracking import WebcamVideoStream import torch import cv2 data_path = os.getenv('YFYF_data') join = lambda x : os.path.join(data_path, x) pnet, rnet, onet = create_mtcnn_net(p_model_path=join("dface/model_store/pnet_epoch.pt"), r_model_path=join("dface/model_store/rnet_epoch.pt"), o_model_path=join("dface/model_store/onet_epoch.pt"), use_cuda=True) mtcnn_detector = MtcnnDetector(pnet=pnet, rnet=rnet, onet=onet, min_face_size=24) stream = WebcamVideoStream() stream.start() while True: frame, dirty = stream.read() if not dirty: continue rects, _ = mtcnn_detector.detect_face(frame) print(rects) cv2.imshow('frame',frame) if cv2.waitKey(1) & 0xFF == ord('q'): break # cap = cv2.VideoCapture(0) # while(True): # # Capture frame-by-frame # ret, frame = cap.read()