def detectFaces(FrameQueue, RectQueue): print('module name:', __name__) print('parent process:', os.getppid()) print('process id:', os.getpid()) #Start the face detection FaceDetect.StartDetection(FrameQueue, RectQueue) sys.exit() print("Terminated")
def SelectImage(self): try: image = QtWidgets.QFileDialog.getOpenFileName(self,"Select image",'')[0] FaceDetect.Detect_Faces(image) image = QtGui.QPixmap("image.png") self.label.setPixmap(image) os.remove("image.png") except Exception as e:print(e)
def detectFaces(CameraQueue, FrameQueue, RectQueue, FacePointQueue, SpeakerQueue): # print('module name:', __name__) # print('parent process:', os.getppid()) # print('process id:', os.getpid()) #Start the face detection FaceDetect.StartDetection(CameraQueue, FrameQueue, RectQueue, FacePointQueue, SpeakerQueue) sys.exit() print("Terminated")
def init_vision(self): # if self.video is None or not self.video.isOpened(): # if self.use_pipe: # self.video = open_camera(self.video, None, "outpipe") # else: # self.video = open_camera(self.video) self.face_detector = FaceDetect.FaceDetector() self.face_detector.select_trained_cascade_model( "library/vision/Vision/cascade_models/" "haarcascade_frontalface_alt2.xml") self.face_recognizer = FaceRecognizer.FaceRecognizer( 'library/vision/face_recognition/trained_face_model.pkl')
def getimage(num=1, sex=0): imagepath = [] local_url = "" while(1): if sex == 0: local_url = urlman else: local_url = url page = random.randrange(1, 2062) resp = requests.get(local_url+f"page={page}", headers=header) soup = BeautifulSoup(resp.content) imagepathlist = [p['src'] for p in soup.find_all('img') if p.get("src") != None] imagepath = imagepathlist[random.randrange(0, len(imagepathlist))] with requests.get(imagepath, stream=True) as rq: rq.raise_for_status() # check respose status with open("dataset/"+imagepath.split("/")[-1], "wb") as rw: for chunk in rq.iter_content(chunk_size=10000): if chunk: rw.write(chunk) # image = face_recognition.load_image_file( # "dataset/"+imagepath.split("/")[-1]) image, face_locations = FaceDetect.getFaceRect( "dataset/"+imagepath.split("/")[-1]) if len(face_locations) != 1: os.remove("dataset/"+imagepath.split("/")[-1]) continue face_locations = FaceDetect.resizeRect(face_locations) img = Image.fromarray(image) top, right, bottom, left = face_locations[0] image = image[top:bottom, left:right] img = Image.fromarray(image) img.save("dataset/"+imagepath.split("/")[-1]) print(f"Save image {imagepath}") break return "dataset/"+imagepath.split("/")[-1], imagepath.split("/")[-1]
def process_image_message(event): result_message_array = [] response = requests.get( f"https://api.line.me/v2/bot/message/{event.message.id}/content", stream=True, headers={'Authorization': f'Bearer {secretFileContentJson.get("channel_access_token")}'}) img = Image.open(response.raw) filepath = f"predection/{event.message.id}.{img.format.lower()}" img.save(filepath) # filepath = "predection/308992.jpg" img, rects = FD.getFaceRect(filepath) copyrect = [i for i in rects] Img = Image.fromarray(img) imgdrw = ImageDraw.Draw(Img) for i in range(0, len(rects)): top, right, bottom, left = rects[i] imgdrw.rectangle([(left, top), (right, bottom)], outline=(255, 0, 0), width=10) rectResize = FD.resizeRect(rects) score = [] scorestring = "" font = ImageFont.truetype('arial.ttf', 60) for i in range(len(rectResize)): top, right, bottom, left = rectResize[i] resizeimage = img[top:bottom, left:right] nowscore = round(FSA.prediction(resizeimage), 3) scorestring += str(nowscore)+"\n" top, right, bottom, left = copyrect[i] imgdrw.text((left, top-70), str(nowscore), (255, 0, 0), font=font) score.append(nowscore) Img.save(filepath) result_message_array.append(uploadImage(filepath.split('/')[-1], filepath)) result_message_array.append(TextSendMessage(text=scorestring)) line_bot_api.reply_message( event.reply_token, result_message_array )
def predict_knn(x_img_path, k, knn_clf=None, model_path=None, distance_threshold=0.5): global recog_count, start, end recog_count = 0 confidence_scores = [] if not os.path.isfile(x_img_path) or os.path.splitext(x_img_path)[1][1:] not in ALLOWED_EXTENSIONS: raise Exception("Invalid image path: {}".format(x_img_path)) if knn_clf is None and model_path is None: raise Exception("Must supply k classifier either through knn_clf or model_path") # Load a trained KNN model (if one was passed in) if knn_clf is None: with open(model_path, 'rb') as f: knn_clf = pickle.load(f) # Load image file and find face locations image = fr.load_image_file(x_img_path) face_locations = FaceDetect.get_new_locations() # If no faces are found in the image, return an empty result. if len(face_locations) == 0: return [] start = time.time() # Find encodings for faces in the test image faces_encodings = fr.face_encodings(image, known_face_locations=face_locations) # Use the KNN model to find the best matches for the test face closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=k) are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(face_locations))] name_prediction = knn_clf.predict(faces_encodings) # Predict classes and remove classifications that aren't within the threshold predictions = [(pred, loc) if rec else ("unknown", loc) for pred, loc, rec in zip(name_prediction, face_locations, are_matches)] end = time.time() # Count recognized persons recog_count = np.sum(are_matches) # calculate the confidence score confidence_scores = calculate_confidence_score_knn(face_locations, closest_distances, distance_threshold, name_prediction, k) return predictions, confidence_scores
def reset(self): self._product = fd.FaceDetect()
def display_number_of_faces_detected(): number_of_faces_detected = str(FaceDetect.get_number_of_faces_detected()) l_faces_detected.setText(number_of_faces_detected) l_faces_detected.repaint()
def display_detection_time(): detection_time = str(round(FaceDetect.get_detection_time(), 5)) l_face_detection_time.setText(detection_time) l_face_detection_time.repaint()
def pb_detect_faces_clicked(): # run selected detection method detection_method = cb_face_detection_method.currentText() if detection_method == "Dlib Hog": FaceDetect.dlib_hog(upload_image_path, 1) detect_image = FaceDetect.draw_boxes1() elif detection_method == "Dlib CNN": FaceDetect.dlib_cnn(upload_image_path, 1) detect_image = FaceDetect.draw_boxes1() elif detection_method == "OpenCV Haar default.xml": FaceDetect.opencv_haar(upload_image_path) detect_image = FaceDetect.draw_boxes2() elif detection_method == "OpenCV CNN tensorflow": FaceDetect.opencv_tensorflow(upload_image_path) detect_image = FaceDetect.draw_boxes3() elif detection_method == "OpenCV CNN caffe": FaceDetect.opencv_caffe(upload_image_path) detect_image = FaceDetect.draw_boxes3() elif detection_method == "Dlib Hog2": FaceDetect.dlib_hog(upload_image_path, 2) detect_image = FaceDetect.draw_boxes1() elif detection_method == "Dlib CNN2": FaceDetect.dlib_cnn(upload_image_path, 2) detect_image = FaceDetect.draw_boxes1() # display image with detected faces detect_image.save("temp files/temp.jpg") display_image_faces("temp files/temp.jpg") # display results reset_results() display_detection_time() display_number_of_faces_detected() # enable recognizing faces pb_recognize_faces.setDisabled(False) pb_recognize_faces.repaint()
expected = [43, 44] poi = (0, 0) poi_hist = (0, 0) loop = 0 flag = 0 pyautogui.moveTo(960, 540) # centerpoint for (1920, 1080) screen size while True: # Capture the video frame-by-frame ret, frame = cap.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Detect faces in the image # faces = FaceDetect.facedetect(gray) faces = FaceDetect.facedetect(frame) print("Face Detected!".format(len(faces))) # Draw a rectangle around the faces for (x, y, w, h) in faces: cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2) temp = frame[y:y + h, x:x + w] cropface = temp.copy() cropface, roi_eye, flag = EyeDetect.eyedetect(cropface, flag) # Dlib rectangle dlib_rect = dlib.rectangle(int(x), int(y), int(x + w), int(y + h)) detected_landmarks = predictor(frame, dlib_rect).parts() landmarks = np.matrix([[p.x, p.y] for p in detected_landmarks]) # copy the frame so we can add features for a better view image = frame.copy()
test6_img = cv2.imread("ExtendedYaleB/s4/yaleB19_P00A-020E-40.pgm", 1) test7_img = cv2.imread("ExtendedYaleB/s4/yaleB13_P00A-005E+10.pgm", 1) face_recognizer = cv2.face.LBPHFaceRecognizer_create() face_recognizer.read("recognizers/reco_4.xml") # make a copy of the image as we don't want to change original image img = test_img.copy() img2 = test2_img.copy() img3 = test3_img.copy() img4 = test4_img.copy() img5 = test5_img.copy() img6 = test6_img.copy() img7 = test7_img.copy() # detect face from the image face = fd.FaceDetect(img) face2 = fd.FaceDetect(img2) face3 = fd.FaceDetect(img3) face4 = fd.FaceDetect(img4) face5 = fd.FaceDetect(img5) face6 = fd.FaceDetect(img6) face7 = fd.FaceDetect(img7) # predict the image using our face recognizer print("image from base") label2 = face_recognizer.predict(face2) print("to jest warostsc predykcji dla yaleb11") print(label2) label3 = face_recognizer.predict(face3) print("to jest warostsc predykcji dla yaleb13")
import cv2 import numpy as np import FaceDetect as fd faces_loaded, labels_loaded = fd.BaseLoad("test_s4.csv") faces = [] labels = [] for x in range(len(faces_loaded)): if type(fd.FaceDetect(faces_loaded[x])) != type(None): print(type(fd.FaceDetect(faces_loaded[x]))) faces.append(fd.FaceDetect(faces_loaded[x])) labels.append(int(labels_loaded[x], base=15)) face_recognizer = cv2.face.LBPHFaceRecognizer_create() face_recognizer.train(faces, np.array(labels)) face_recognizer.write("reco_4.xml")