#time.sleep(1) curTime = time.time() # calc fps find_results = [] frame = frame[:, :, 0:3] boxes, scores = face_detector.detect(frame) face_boxes = boxes[np.argwhere(scores>0.3).reshape(-1)] face_scores = scores[np.argwhere(scores>0.3).reshape(-1)] print('Detected_FaceNum: %d' % len(face_boxes)) if len(face_boxes) > 0: for i in range(len(face_boxes)): box = face_boxes[i] cropped_face = frame[box[0]:box[2], box[1]:box[3], :] cropped_face = cv2.resize(cropped_face, (160, 160), interpolation=cv2.INTER_AREA) feature = face_recognition.recognize(cropped_face) name = face_classfier.classify(feature) cv2.rectangle(frame, (box[1], box[0]), (box[3], box[2]), (0, 255, 0), 2) # plot result idx under box text_x = box[1] text_y = box[2] + 20 cv2.putText(frame, name, (text_x, text_y), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), thickness=1, lineType=2) else: print('Unable to align') sec = curTime - prevTime prevTime = curTime fps = 1 / (sec)
curTime = time.time() # calc fps find_results = [] frame = frame[:, :, 0:3] if skip==15: boxes, scores = face_detector.detect(frame) face_boxes = boxes[np.argwhere(scores>0.3).reshape(-1)] face_scores = scores[np.argwhere(scores>0.3).reshape(-1)] print('Detected_FaceNum: %d' % len(face_boxes)) if len(face_boxes) > 0: for i in range(len(face_boxes)): box = face_boxes[i] cropped_face = frame[box[0]:box[2], box[1]:box[3], :] cropped_face = cv2.resize(cropped_face, (160, 160), interpolation=cv2.INTER_AREA) feature = face_recognition.recognize(cropped_face,mobilenet=mobilenet) print("len features {}".format(len(feature))) ind,probab = face_classfier.ensemble(feature) name=index_to_name(ind) print(name,probab) '''sum=np.load('sum.npy') print(np.shape(sum)) names=['ayush','gautham','lokesh','milind','nishant','pranjal','rishhanth','sumanth'] diff=sum-feature res= np.linalg.norm(diff,axis=1) print(res) res_ind=np.argmin(res) if res[res_ind] <= 0.5 : y_pred = names[res_ind]'''
import cv2 import numpy as np from recognition.FaceRecognition import FaceRecognition from detection.FaceDetector import FaceDetector face_detector = FaceDetector() face_recognition = FaceRecognition() image_files = ['./media/1.jpg', './media/2.jpg'] for input_str in image_files: img = cv2.imread(input_str) boxes, scores = face_detector.detect(img) face_boxes = boxes[np.argwhere(scores > 0.5).squeeze()] print('Number of face in image:', len(face_boxes)) for box in face_boxes: cropped_face = img[box[0]:box[2], box[1]:box[3], :] cropped_face = cv2.resize(cropped_face, (160, 160), interpolation=cv2.INTER_AREA) print('Face descriptor:') print(face_recognition.recognize(cropped_face), '\n') cv2.imshow('image', cropped_face) cv2.waitKey(0) cv2.destroyAllWindows()
def main(args): face_detector = FaceDetector() face_recognition = FaceRecognition(args.model) face_classfier = FaceClassifier(args.classifier_filename) video_capture = cv2.VideoCapture(args.video_input) output_file = './media/result/' + os.path.basename( args.video_input) + '_result.avi' fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter( output_file, fourcc, 24.0, (int(video_capture.get(3)), int(video_capture.get(4)))) print('Start Recognition!') prevTime = 0 while video_capture.isOpened(): ret, frame = video_capture.read() curTime = time.time() # calc fps find_results = [] frame = frame[:, :, 0:3] boxes, scores = face_detector.detect(frame) face_boxes = boxes[np.argwhere(scores > 0.3).reshape(-1)] face_scores = scores[np.argwhere(scores > 0.3).reshape(-1)] print('Detected_FaceNum: %d' % len(face_boxes)) if len(face_boxes) > 0: for i in range(len(face_boxes)): box = face_boxes[i] cropped_face = frame[box[0]:box[2], box[1]:box[3], :] cropped_face = cv2.resize(cropped_face, (160, 160), interpolation=cv2.INTER_AREA) feature = face_recognition.recognize(cropped_face) name = face_classfier.classify(feature) cv2.rectangle(frame, (box[1], box[0]), (box[3], box[2]), (0, 255, 0), 2) # plot result idx under box text_x = box[1] text_y = box[2] + 20 cv2.putText(frame, name, (text_x, text_y), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), thickness=1, lineType=2) else: print('Unable to align') sec = curTime - prevTime prevTime = curTime fps = 1 / (sec) str = 'FPS: %2.3f' % fps text_fps_x = len(frame[0]) - 150 text_fps_y = 20 cv2.putText(frame, str, (text_fps_x, text_fps_y), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 0), thickness=1, lineType=2) out.write(frame) video_capture.release() out.release() cv2.destroyAllWindows()
print('Number of classes: %d' % len(dataset)) print('Number of images: %d' % len(paths)) # Run forward pass to calculate embeddings print('Calculating features for images') image_size = 160 nrof_images = len(paths) features = np.zeros((2 * nrof_images, 512)) labels = np.asarray(labels).repeat(2) for i in range(nrof_images): img = cv2.imread(paths[i]) if img is None: print('Open image file failed: ' + paths[i]) continue boxes, scores = face_detector.detect(img) if len(boxes) < 0 or scores[0] < 0.5: print('No face found in ' + paths[i]) continue cropped_face = img[boxes[0][0]:boxes[0][2], boxes[0][1]:boxes[0][3], :] cropped_face_flip = cv2.flip(cropped_face, 1) features[2 * i, :] = face_recognition.recognize(cropped_face) features[2 * i + 1, :] = face_recognition.recognize(cropped_face_flip) print('Start training for images') face_classfier.train(features, labels, model='svm', save_model_path='./classifier/trained_classifier.pkl')
nrof_images = len(paths) features = np.zeros((2 * nrof_images, args.embedding_size)) labels = np.asarray(labels).repeat(2) for i in range(nrof_images): img = cv2.imread(paths[i]) if img is None: print('Open image file failed: ' + paths[i]) continue boxes, scores = face_detector.detect(img) if len(boxes) < 0 or scores[0] < 0.5: print('No face found in ' + paths[i]) continue cropped_face = img[boxes[0][0]:boxes[0][2], boxes[0][1]:boxes[0][3], :] cropped_face_flip = cv2.flip(cropped_face, 1) features[2 * i, :] = face_recognition.recognize(cropped_face, mobilenet=mobilenet) features[2 * i + 1, :] = face_recognition.recognize(cropped_face_flip, mobilenet=mobilenet) np.save('features', features) np.save('labels', labels) '''trained on 8 classes . 1110 images . accuracy:0.9617117117117117 precision,accuracy and f1 score is (array([0.98387097, 1. , 0.82461538, 0.9887218 , 0.98245614, 0.9858156 , 0.95955882, 0.99618321]), array([0.87142857, 1. , 0.97101449, 0.99621212, 0.9929078 , 0.99285714, 0.93884892, 0.93214286]), array([0.92424242, 1. , 0.89184692, 0.99245283, 0.98765432, 0.98932384, 0.94909091, 0.96309963]), array([280, 280, 276, 264, 282, 280, 278, 280]))''' print('Start training for images') face_classfier.train(features, labels, model=model_type,