class MaskDetector(object): def __init__(self, maskD_model_path, faceD_model_path): self.model_filepath = maskD_model_path self.face_detector = FaceDetector(faceD_model_path) self.interpreter, self.input_details, self.output_details = self.load_model_tflite(self.model_filepath) # loading the mask detection tflite model def load_model_tflite(self, model_path): interpreter = tf.lite.Interpreter(model_path) interpreter.allocate_tensors() input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() return interpreter, input_details, output_details # preprocessing accordingly after face detection def preprocess(self, frame): t0 = time.time() boxes = self.face_detector.detect_faces(frame) print('UltraLightWeight() - face detection time: {:.3f} seconds'.format(time.time() - t0)) # if no face detected, return none if len(boxes) == 0: return None # mask detection will be performed for the face with the max confidence score box = boxes[0] # box coordinates fixed accordingly if < 0. > height or > width if box['start_x'] < 0: box['start_x'] = 0 if box['start_y'] < 0: box['start_y'] = 0 if box['end_x'] > frame.shape[1]: box['end_x'] = frame.shape[1] if box['end_y'] > frame.shape[0]: box['end_y'] = frame.shape[0] face = frame[box['start_y']:box['end_y'], box['start_x']:box['end_x']] face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB) face = cv2.resize(face, (224, 224)) face = (np.float32(face) - 127.5) / 128 face = np.expand_dims(face, axis=0) return face # return 'mask' and 'no-mask' percentages def masked_or_not(self, frame): input_data = self.preprocess(frame) # check for no face if input_data is None: print("no face detected...") return None t0 = time.time() self.interpreter.set_tensor(self.input_details[0]['index'], input_data) self.interpreter.invoke() output_data = self.interpreter.get_tensor(self.output_details[0]['index']) print('Mask detection time: {:.3f} seconds'.format(time.time() - t0)) # return res = {'mask': float(output_data[0][0]), 'no-mask': float(output_data[0][1])} return res
from FaceDetection import FaceDetector import dlib, cv2, numpy as np, os, pickle image_path = "/home/palnak/PycharmProjects/ExpRec/temp1.png" landmark_data = "params/shape_predictor_68_face_landmarks.dat", face_recognition_model = "params/dlib_face_recognition_resnet_model_v1.dat" faces = FaceDetector() detector = dlib.get_frontal_face_detector() final_prediction = [] actual_prediction = [] prediction_list = [] average_prediction_dictionary = {} actual_probability = [] number_of_count = {} path_to_svm = "params/svm.pkl" def _rect_to_css(face, frame, prediction): """if want to convert dlib rectangle and view the result""" """face_image = frame[face.top():face.bottom(), face.left():face.right()] pil_image = Image.fromarray(face_image) pil_image.show() pil_image.save("face.png")""" x = face.left() y = face.top() w = face.right() - x h = face.bottom() - y cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) cv2.putText(frame, str(prediction), (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
def make_set(known_faces, detector, datafile_train, datafile_test, face_encoder): faces = FaceDetector("params/dlib_face_recognition_resnet_model_v1.dat", "params/shape_predictor_68_face_landmarks.dat") training_data = [] celebrity_data = [] celebrity_labels = [] training_labels = [] prediction_data = [] prediction_labels = [] print known_faces for known_faces in known_faces: training, prediction = get_files(known_faces) for item in training: print item image = scipy.misc.imread(item) # open image image, dlib_face = faces.detect(image, detector) success, landmark_points = faces.face_geometry(dlib_face, image) success, face_encoding_points = face_encoding( landmark_points, image, face_encoder) if success: training_data.append(face_encoding_points.flatten()) celebrity_data.append(face_encoding_points.flatten()) celebrity_labels.append(item) training_labels.append(known_faces) else: print "ERROR 404!!" print "Creating validation set..." f = open(datafile_train, 'wb') pickle.dump(training_data, f) pickle.dump(training_labels, f) f.close() for item in prediction: print item image = scipy.misc.imread(item) # open image image, dlib_face = faces.detect(image, detector) success, landmark_points = faces.face_geometry(dlib_face, image) # if len(landmark_points) < 1: # continue success, face_encoding_points = face_encoding( landmark_points, image, face_encoder) if success: prediction_data.append(face_encoding_points.flatten()) prediction_labels.append(known_faces) celebrity_data.append(face_encoding_points.flatten()) celebrity_labels.append(item) else: print "ERROR 404!!!" f = open(datafile_test, 'wb') pickle.dump(prediction_data, f) pickle.dump(prediction_labels, f) f.close() create_celeb_classifier(celebrity_data, celebrity_labels, known_faces) celebrity_data = [] celebrity_labels = [] return training_data, training_labels, prediction_data, prediction_labels
''' File that tests the basic functionality of the alrotirhm ''' from FaceDetection import FaceDetector import sys from time import sleep filename = "./singles/Adrian_Murrell_0001.pgm" detector = FaceDetector() print("Edge Detection off") detector.updateSettings(detectEdges=False, mode='data') d, closest = detector.detectFace(filename) d, closest = detector.detectFace(filename) print("\nTraining for another library") print("The library should be reconstructed") detector.updateSettings(trainDataPath="./singles") detector.trainModel() print("\nTest the algorithm with several detections") print("The library should not be reconstructed") d, closest = detector.detectFace(filename) d, closest = detector.detectFace(filename) print("\nTurning on no-data mode, setting back the main library") detector.updateSettings(mode='no-data', detectEdges=False, trainDataPath='./library') print("Predicting...\nThe model should be reconstructed")
from FaceDetection import FaceDetector import ProgressBar import os file_names = os.listdir('./Test') detector = FaceDetector() detector.updateSettings(mode='no-data', showSteps=False) old_faces = 0 new_faces = 0 not_faces = 0 print("Testing old version") ProgressBar.initializeProgressBar(len(file_names)) detector.updateSettings(detectEdges=False) for image_path in file_names: ProgressBar.increaseProgressBar() result = detector.detectFace("./Test/" + image_path) if result == 0: old_faces += 1 elif result == 1: new_faces += 1 else: not_faces += 1 ProgressBar.completeProgressBar() iter_1_old_faces = old_faces iter_1_new_faces = new_faces iter_1_not_faces = not_faces print("Testing newer version") ProgressBar.initializeProgressBar(len(file_names))
def __init__(self, maskD_model_path, faceD_model_path): self.model_filepath = maskD_model_path self.face_detector = FaceDetector(faceD_model_path) self.interpreter, self.input_details, self.output_details = self.load_model_tflite(self.model_filepath)
from FaceDetection import FaceDetector detector = FaceDetector() detector.updateSettings( detectEdges=False, saveData=True, showSteps=True, mode='no-data', singlesDataPath="./singles", duplicatesDataPath="./duplicates", trainDataPath="./library" ) detector.detectFace("./Test/Robert_De_Niro_0002.pgm")
from FaceDetection import FaceDetector warnings.filterwarnings("ignore") app = Flask(__name__, static_url_path="/static") logger = logging.getLogger(__name__) args = { 'faceDetection_model': '/home/tigerit/PycharmProjects/MaskedOrNot/models/version-RFB-640.onnx', 'maskDetection_model': '/home/tigerit/PycharmProjects/MaskedOrNot/models/mask_detector.tflite' } faceDetector = FaceDetector(args['faceDetection_model']) maskDetector = MaskDetector(args['maskDetection_model'], args['faceDetection_model']) @app.route('/', methods=['GET']) def home(): return render_template('main.html') @app.route('/detect_faces', methods=['POST']) def detect(): photo = request.get_data() # by specifying rank, desired number of detected faces can be returned, returns all if rank not provided rank = request.args.get('rank', default='-1').lower() try: