Esempio n. 1
0
def display_list_recognized_faces():
    list_recognized_faces.clear()
    recognized_faces = FaceRecog.get_recognized_faces()
    for item in recognized_faces:
        list_recognized_faces.addItem(item)

    repaint_list(list_recognized_faces)
Esempio n. 2
0
def faceIdentification():
    #speaker("Start face identification system")
    face_recog = FaceRecog.FaceRecognition()
    print(face_recog.known_face_names)
    name = "Unknown"

    for i in range(3):
        print(i)
        name, frame = face_recog.get_frame()
        print("took photo")
        if name != "Unknown":

            greeting = "Hello " + name
            speaker(greeting)
            break

        # take photo interval = 3 sec
        time.sleep(3)

        # show the frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF
        print(name)

        if i == 2:
            #speaker("Get out!")
            print("Get out!")
            cv2.imwrite('stranger.jpg',
                        frame,
                        params=[cv2.IMWRITE_PNG_COMPRESSION, 0])

            # +++++++++++MESSAGE++++++++++++++

    # do a bit of cleanup
    cv2.destroyAllWindows()
Esempio n. 3
0
def pb_recognize_faces_clicked():
    # run selected recognition method
    recognition_method = cb_recognition_method.currentText()
    number_of_neighbors = int(recognition_method[:1])
    recognize_image = FaceRecog.run_knn(upload_image_path, number_of_neighbors)

    # display image with detected objects
    recognize_image.save("temp files/tempRecog.jpg")
    display_image_faces("temp files/tempRecog.jpg")

    # display results
    display_recognition_time()
    display_number_of_faces_recognized()
    display_list_recognized_faces()
Esempio n. 4
0
def display_recognition_time():
    recognition_time = str(round(FaceRecog.get_recognition_time(), 5))
    l_face_recognition_time.setText(recognition_time)
    l_face_recognition_time.repaint()
Esempio n. 5
0
def display_number_of_faces_recognized():
    number_of_faces_recognized = str(
        FaceRecog.get_number_of_faces_recognized())
    l_faces_recognized.setText(number_of_faces_recognized)
    l_faces_recognized.repaint()
Esempio n. 6
0
import CascadeDetector
import FrameGenerator
from PIL import Image
from imutils.object_detection import non_max_suppression
from imutils import paths
import imutils
import numpy as np
import os, sys, time
#import ty
import json

print("Hello")
fn_haar = 'FACE'
fn_dir = 'database'

rec = FaceRecog.FaceRecog(recogtype="OD_LBPH_FACE")
rec.initTrainer(location=fn_dir)
(image, lables, names, id) = ([], [], {}, 0)
print("Training ......")
image, lables, names, id = rec.train()

hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())

app = Flask(__name__)


def pydetect_vid(name):
    size = 4
    fn_haar = 'trained/haarcascade_frontalface_default.xml'
    fn_dir = 'database'
Esempio n. 7
0
def frecog():
    content = request.get_json()
    img_data = content['ImageBLOB']
    imgdata = base64.b64decode(img_data)
    filename = 'image2.png'
    with open(filename, 'wb') as f:
        f.write(imgdata)
    #name=ty.m()
    im = cv2.imread("image2.png")
    im = cv2.flip(im, 1, 0)
    size = 4
    fn_haar = 'FACE'
    fn_dir = 'database'

    rec = FaceRecog.FaceRecog(recogtype="OD_LBPH_FACE")
    rec.initTrainer(location=fn_dir)
    print("Training ......")
    image, lables, names, id = rec.train()
    #print(names)

    (im_width, im_height) = (224, 184)

    det = CascadeDetector.CascadeDetector()
    det.setTrainedDataId(fn_haar)
    det.setTrainedDataLocation()
    #print(det.getTrainedDataLocation())
    det.initDetector()
    det.setScaleFactor(1.3)
    det.setMinNeighbours(5)

    gray = det.gray(im)
    #cv2.imwrite("gray.jpg",gray)

    faces = det.detect(gray)
    print(len(faces))
    send = ""
    for (x, y, w, h) in faces:
        cv2.rectangle(im, (x, y), (x + w, y + h), (255, 0, 0), 2)
        face = gray[y:y + h, x:x + w]
        cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0), 3)

        pred, l, conf = rec.predict(face, im_width, im_height)
        print(pred, conf)
        #print ("pred",prediction)
        if conf < 90:
            send = send + names[pred] + " "
            cv2.putText(im, '%s - %.0f' % (names[pred], pred),
                        (x - 10, y - 10), cv2.FONT_HERSHEY_PLAIN, 1,
                        (0, 255, 0))
        else:
            send = send + "Unrecognized face" + " "
            cv2.putText(im, 'not recognized', (x - 10, y - 10),
                        cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255))
    #cv2.imshow('OpenCV', im)
    #key = cv2.waitKey(10000)

    #webcam.release()
    #cv2.destroyAllWindows()
    #server_response = [{'aaa', "lmao"}]
    #josn_obj = demjson.encode(server_response)
    send = send + " Detected"
    print(send)
    data2 = {"name": send}

    #json_data = json.dumps(data2)
    return jsonify(data2)