class VerifyUser: def __init__(self): self.helper = OpenCVHelper() self.dg = DataSetGenerator(os.path.join(path, "training_face_rec")) MAX_LABELS = len(self.dg.data_labels) self.predictor = Predictor(self.dg, num_labels=MAX_LABELS) def get_results(self, file_in): if not os.path.isfile(file_in): print("Captured file not found!") return False """ ------------------- Loading and process the video file ----------------- """ num_blinks = eye_p.get_num_blinks(file_in) print "Num Blinks : %d " % num_blinks if num_blinks <= 0: return None, "No Eye Blink found. !" cap = cv2.VideoCapture(file_in) results = list() while True: ret, image = cap.read() if not ret: break img_face, pos_face = self.helper.convert_img(image) if img_face is not None: cv2.rectangle(image, (pos_face[0], pos_face[2]), (pos_face[1], pos_face[3]), (255, 0, 0), 5) user, pred = self.predictor.classify_image(img_face) results.append(pred) if len(results) > 0: r = sum(results) / float(len(results)) r = int(abs(r)) return self.dg.data_labels[r], True return None, "No Face Found !"
while True: ret, image = cap.read() if ret: cv2.imshow("Faces found", image) # image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) img_face, pos_face = helper.convert_img(image) # img_face = helper.convert_to_greyscale(image) if img_face is not None: cv2.rectangle(image, (pos_face[0], pos_face[2]), (pos_face[1], pos_face[3]), (255, 0, 0), 5) f_img_verify = p.classify_image(img_face) # for i in range(4): # st_verify[i] = st_verify[i + 1] # st_verify[4] = f_img_verify print f_img_verify if st_verify == [True, True, True, True, True]: f_verify = True elif st_verify == [False, False, False, False, False]: f_verify = False cv2.putText(image, f_img_verify, (50, 50), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 255), 2) cv2.imshow("Faces found", image)