def index1(): #print request.json imgURL = request.json["photourl"] print("URL : " + imgURL) p = FaceDetect(imgURL) a = p.detect() return jsonify({'id': a}), 201
def __init__(self): self.face_detector = FaceDetect() self.face_recognizer = cv2.face.LBPHFaceRecognizer_create() # TODO choose a recognizer # or use EigenFaceRecognizer by replacing above line with # face_recognizer = cv2.face.EigenFaceRecognizer_create() # or use FisherFaceRecognizer by replacing above line with # face_recognizer = cv2.face.FisherFaceRecognizer_create() self.faces = [] self.labels = [] self.label_dict = {} self.avg_face_dim = ()
def main(): if not os.path.isdir(training_set): print("Cropping faces and making train and test sets") face = FaceDetect() face.face_return() train_test_split.split() print("Training....") train_set = Eigen(training_set) train_image_label = train_set.label_extract() train_stacked_images, mean_face = train_set.image_processing() _, eig_face = train_set.eigen_value() # selecting no of eigen faces, the first values are the largest ones no need to sort them eig_face = eig_face[:, :no_of_eigfaces] train_weights, recons_train = train_set.weights_calculation( eig_face, mean_face) # display selected eigen faces # train_set.display_data(eig_face, 'Selected Eigen faces') # # # display original images # train_set.display_data(train_stacked_images, 'Original faces') # # # display reconstructed training face # train_set.display_data(recons_train, 'reconstructed training data') print("Training finished, testing ......") test_set = Eigen(testing_set) test_image_label = test_set.label_extract() print('Original label:', test_image_label) test_stacked_images, _ = test_set.image_processing(mean_face) test_weights, recons_test = test_set.weights_calculation( eig_face, mean_face) # display original test face test_set.display_data(test_stacked_images, 'original testfaces') # display reconstructed test face test_set.display_data(recons_test, 'reconstructed test data') test = Test(train_stacked_images, train_weights, test_weights) predicted_label = test.match_index(train_image_label) print('Predicted label:', predicted_label) match_check = [ 1 if tl == pl else 0 for tl, pl in zip(test_image_label, predicted_label) ] print(match_check) print("Accuracy:", sum(match_check) / len(predicted_label))
from face_detect import FaceDetect x = FaceDetect() x.run()
def capture_and_mark(self): sl = StudentsList(self.class_name) students, roll_numbers = sl.load_pkl_file() FaceDetectObj = FaceDetect(self.class_name) Yes = True No = False Cancel = None i = 0 while i <= 2: captured_image = None frame = None students_present = [] while len(students_present) == 0: captured_image, frame = capture() students_present = FaceDetectObj.recognize(captured_image, roll_numbers) if students_present == "No Training Data": return try: name_student_present = students[roll_numbers.index(students_present[0])] except: messagebox.showerror( "Error", "Recognized student not in database\nUnable to mark attendance", ) return response = messagebox.askyesnocancel( "Confirm your identity", students_present[0] + "\n" + name_student_present, ) if response is Yes: wb = excel.attendance_workbook(self.class_name) excel.mark_present(wb, students_present, self.class_name) img_path = os.path.join( os.getcwd(), "images", self.class_name, "s" + students_present[0][-2:], os.path.basename(captured_image), ) cv2.imwrite(img_path, frame) os.remove(captured_image) messagebox.showinfo( "Attendance Confirmation", "Your attendance is marked!" ) break elif response is Cancel: break elif response is No: if i == 2: img_path = os.path.join( os.getcwd(), "images", self.class_name, "unrecognized students", os.path.basename(captured_image), ) cv2.imwrite(img_path, frame) messagebox.showinfo( "Unrecognized Student", "You were not recognized as any student of this class.\nYour attendance will be marked later if you really are", ) cv2.imwrite(img_path, frame) os.remove(captured_image) i += 1
def run(images_path="media/", filename="", num_of_results=1, hm_lvl=0, certainty=0, data_dir=DATA_DIR): """execute face detection than vgg face and finally grad-cam :param filename: query image filename (default = "") :param images_path: query and output image path (default = "media/") :param data_dir: openCV directory path (default = DATA_DIR) :return TODO """ res = Results(certainty) file_path = os.path.join(images_path, filename) if not file_path: res.err_msg = "ERROR: cannot load input image {}".format(filename) res.err_code = 1 message(res.err_msg) return res face = FaceDetect(data_dir, file_path) if not face.is_valid: res.err_msg = "ERROR: cannot load input image {}".format(filename) res.err_code = 1 message(res.err_msg) return res face.load_cascades() if not face.is_loaded: res.err_msg = "ERROR: cannot load cascades from: {}".format(data_dir) res.err_code = 2 message(res.err_msg) return res face.detect_face() if not face.has_face: res.err_msg = "ERROR: sorry, frontal face wasn't detected" res.err_code = 3 message(res.err_msg) return res # Crop faces from query image im = cv2.imread(file_path) # If found more than one face, pick the biggest one (w * h) cropped_im = crop_rect(im, max(face.features, key=lambda f: f[2] * f[3])) cv2.imwrite(os.path.join(images_path, "cropped.jpg"), cropped_im) # Run forward pass and GradCam on cropped image pred_labels, err_msg = classifier_gcam.predict(images_path, num_of_results) if (pred_labels is None) or (len(pred_labels) == 0): res.err_msg = err_msg res.err_code = 4 message(res.err_msg) return res # Get predicted label from Torch output pred_ids = get_prediction_from_names(pred_labels) if len(pred_labels) == 0: res.err_msg = "ERROR: could not load names.txt file" res.err_code = 5 message(res.err_msg) return res res.set_results(cropped_im, pred_ids[0], pred_labels[0], pred_ids[1:], hm_lvl) res.find_significant_features() return res
class FaceRecognition(): def __init__(self): self.face_detector = FaceDetect() self.face_recognizer = cv2.face.LBPHFaceRecognizer_create() # TODO choose a recognizer # or use EigenFaceRecognizer by replacing above line with # face_recognizer = cv2.face.EigenFaceRecognizer_create() # or use FisherFaceRecognizer by replacing above line with # face_recognizer = cv2.face.FisherFaceRecognizer_create() self.faces = [] self.labels = [] self.label_dict = {} self.avg_face_dim = () def load_training_data(self, path): if os.path.isdir(path): n_trainees = 1 for training_dir in os.listdir(path): label = os.path.basename(training_dir) self.label_dict[n_trainees] = label # TODO parallize this for speed up later for f in os.listdir(path+'/'+training_dir): # build correct path img_path = '/'.join([path,training_dir,f]) # detect face; assume only 1 face and it the 1st one bbox, face = self.face_detector.in_file(img_path) # append respective list self.faces.append(face[0]) self.labels.append(n_trainees) n_trainees += 1 def uniform_faces(self): # collect dimensions of all faces face_dims = [f.shape for f in self.faces] # get average value self.avg_face_dim = tuple(np.mean(face_dims, axis=0).astype(int)) # resize all to be the same size self.faces = [cv2.resize(face, self.avg_face_dim) for face in self.faces] def train(self, path): # load training data if none if not self.faces and not self.labels: self.load_training_data(path) self.uniform_faces() # train if self.faces and self.labels and len(self.faces) == len(self.labels): self.face_recognizer.train(self.faces, np.array(self.labels)) def predict_face(self, face_img): img_copy = face_img.copy() if len(img_copy.shape) == 3: img_copy = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY) label, confidence = self.face_recognizer.predict(img_copy) return label, confidence def predict_img(self, img): # find face(s) in image bbox, faces = self.face_detector.in_frame(img) # make face same size as training data if self.avg_face_dim: faces[0] = cv2.resize(faces[0], self.avg_face_dim) # predict label, confidence = self.predict_face(faces[0]) return label, confidence def predict_file(self, fname): img = cv2.imread(fname) label, confidence = self.predict_img(img) return label, confidence