def testeValidaFace(imagem): detections = detector.detect_faces(imagem) for detection in detections: score = detection["confidence"] if score >= 0.95: keypoints = detection["keypoints"] x, y, w, h = detection['box'] new_image = alinhaFace(normalizaFace(imagem), keypoints) detected_face = new_image[y:y + h, x - ((x + h) - (x + w)):x + h] img0 = cv2.resize(detected_face, (100, 100), interpolation=cv2.INTER_AREA) img = np.asarray(img0, dtype=np.float32) / 255.0 img = np.expand_dims(img, 0) feature = nca.transform(modelo.predict(img)) try: features = np.load("Modelos/features.npy") except: features = [] id = len(features) + 1 cadastrarPessoa(img0, feature, id, id, id, id, id, id, id) else: print("face nao reconhecida")
def ReconheceFaceVideo(paht, rec, detector): cap = cv2.VideoCapture(paht) while (True): conectado, frame = cap.read() if not conectado: break detections = detector.detect_faces(frame) for detection in detections: score = detection["confidence"] if score >= 0.95: try: keypoints = detection["keypoints"] x, y, w, h = detection['box'] new_image = alinhaFace(normalizaFace(frame), keypoints) detected_face = new_image[y:y + h, x - ((x + h) - (w + x)):x + h] img0 = cv2.resize(detected_face, (100, 100), interpolation=cv2.INTER_AREA) except: print("erro ao normalizar imagens") else: img = np.asarray(img0, dtype=np.float32) / 255.0 img = np.expand_dims(img, 0) pess = rec.validaFace(img) if (pess != "face nao reconhecida"): id, path, nome, tel, per, atv, ide2, log, bai, uf, cep = selectFaceDB( int(pess))[0] print( "Id:{0} \nNome:{1} \nTelefone:{2} \nPermissao:{3}\n" "Logradouro:{4} \nBairro:{5} \nUF:{6} \nCEP:{4}". format(str(id), str(nome), str(tel), str(per), str(log), str(bai), str(uf), str(cep))) text = "Id:{0} Nome:{1}".format(str(id), str(nome)) else: text = "face nao reconhecida" frame = cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 50, 50), 2) cv2.putText(frame, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 255, 0), 1, cv2.LINE_AA) cv2.imshow("oi", frame) cv2.waitKey(1) else: print("face nao detectada") cap.release() cv2.destroyAllWindows()
def validaFace(imagem): detections = detector.detect_faces(imagem) for detection in detections: score = detection["confidence"] if score >= 0.99: keypoints = detection["keypoints"] x, y, w, h = detection['box'] new_image = alinhaFace(normalizaFace(imagem), keypoints) detected_face = new_image[y:y + h, x - ((x + h) - (x + w)):x + h] img0 = cv2.resize(detected_face, (100, 100), interpolation=cv2.INTER_AREA) imgc = np.asarray(img0, dtype=np.float32) / 255.0 imgc = np.expand_dims(imgc, 0) feature = nca.transform(modelo.predict(imgc)) # extrai os recursos da base de dados dis, posi = neigh.kneighbors(feature, 2, return_distance=True) # print(dis, posi + 1) id1 = selectFaceDB(int(posi[0][0] + 1))[0] id2 = selectFaceDB(int(posi[0][1] + 1))[0] # print(id1,id2) img1 = cv2.imread(id1[1], cv2.IMREAD_COLOR) imgc = np.asarray(img1, dtype=np.float32) / 255.0 imgc1 = np.expand_dims(imgc, 0) feature1 = nca.transform(modelo.predict(imgc1)) img2 = cv2.imread(id2[1], cv2.IMREAD_COLOR) imgc = np.asarray(img2, dtype=np.float32) / 255.0 imgc1 = np.expand_dims(imgc, 0) feature2 = nca.transform(modelo.predict(imgc1)) cosi1 = cosine_distances(feature, feature1) simi1 = cosine_similarity(feature, feature1) cosi2 = cosine_distances(feature, feature2) simi2 = cosine_similarity(feature, feature2) if (simi1 >= simi2 and cosi1 <= cosi2): id = id1[0] else: id = id2[0] return id else: return -1 return -1
def ReconheceFaceImagem(paht): try: imagem = cv2.imread(paht, cv2.IMREAD_COLOR) detections = detector.detect_faces(imagem) except: print("imagem nao reconhecida") else: for detection in detections: score = detection["confidence"] if score >= 0.95: keypoints = detection["keypoints"] x, y, w, h = detection['box'] new_image = alinhaFace(normalizaFace(imagem), keypoints) detected_face = new_image[y:y + h, x - ((x + h) - (w + x)):x + h] img0 = cv2.resize(detected_face, (100, 100), interpolation=cv2.INTER_AREA) img = np.asarray(img0, dtype=np.float32) / 255.0 img = np.expand_dims(img, 0) pess = rec.validaFace(img) if (pess != "face nao reconhecida"): id, path, nome, tel, per, atv, ide2, log, bai, uf, cep = selectFaceDB(int(pess))[0] print("Id:{0} \nNome:{1} \nTelefone:{2} \nPermissao:{3}\n" "Logradouro:{4} \nBairro:{5} \nUF:{6} \nCEP:{4}".format( str(id), str(nome), str(tel), str(per), str(log), str(bai), str(uf), str(cep))) text = "Id:{0} Nome:{1}".format(str(id), str(nome)) cv2.rectangle(imagem, (x, y), (x + w, y + h), (255, 0, 0), 2) cv2.putText(imagem, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2, cv2.LINE_AA) cv2.imshow("oi", imagem) cv2.waitKey(0) else: text = "face nao reconhecida" cv2.rectangle(imagem, (x, y), (x + w, y + h), (255, 0, 0), 2) cv2.putText(imagem, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2, cv2.LINE_AA) cv2.imshow("oi", imagem) cv2.waitKey(0) else: print("face nao detectada")