Exemplo n.º 1
0
def create_manual_data():
    
    FRGraph = FaceRecGraph();
    MTCNNGraph = FaceRecGraph();
    aligner = AlignCustom();
    extract_feature = FaceFeature(FRGraph)
    face_detect = MTCNNDetect(MTCNNGraph, scale_factor=2); #scale_factor, rescales image for faster detection
    vs = cv2.VideoCapture(0); #get input from webcam
#     print("Please input new user ID:")
    new_name = request.form['input_name']; #ez python input()
    f = open('./facerec_128D.txt','r');
    data_set = json.loads(f.read());
    person_imgs = {"Left" : [], "Right": [], "Center": []};
    person_features = {"Left" : [], "Right": [], "Center": []};
    print("Please start turning slowly. Press 'q' to save and add this new user to the dataset");
    while True:
        _, frame = vs.read();
        rects, landmarks = face_detect.detect_face(frame, 80);  # min face size is set to 80x80
        for (i, rect) in enumerate(rects):
            aligned_frame, pos = aligner.align(160,frame,landmarks[:,i]);
            if len(aligned_frame) == 160 and len(aligned_frame[0]) == 160:
                person_imgs[pos].append(aligned_frame)
                cv2.imshow("Captured face", aligned_frame)
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            break

    for pos in person_imgs: #there r some exceptions here, but I'll just leave it as this to keep it simple
        person_features[pos] = [np.mean(extract_feature.get_features(person_imgs[pos]),axis=0).tolist()]
    data_set[new_name] = person_features;
    f = open('./facerec_128D.txt', 'w');
    f.write(json.dumps(data_set))

    return render_template('home.html')
class VideoCamera(object):
    def __init__(self):
        # Using OpenCV to capture from device 0. If you have trouble capturing
        # from a webcam, comment the line below out and use a video file
        # instead.
        FRGraph = FaceRecGraph();
        self.aligner = AlignCustom();
        self.extract_feature = FaceFeature(FRGraph)
        self.face_detect = MTCNNDetect(FRGraph, scale_factor=2); #scale_factor, rescales image for faster detection

        self.person_imgs = {"Left" : [], "Right": [], "Center": []};
        self.person_features = {"Left" : [], "Right": [], "Center": []};
        self.video = cv2.VideoCapture(2)

        # If you decide to use video.mp4, you must have this file in the folder
        # as the main.py.
        # self.video = cv2.VideoCapture('video.mp4')
    
    def __del__(self):
        f = open('./facerec_128D.txt','r+');
        data_set = json.loads(f.read());

        for pos in self.person_imgs: #there r some exceptions here, but I'll just leave it as this to keep it simple
            self.person_features[pos] = [np.mean(self.extract_feature.get_features(self.person_imgs[pos]),axis=0).tolist()]
        data_set["your_name"] = self.person_features;
        f = open('./facerec_128D.txt', 'w+');
        f.write(json.dumps(data_set))

        print('Saved')

        self.video.release()
    
    def get_frame(self):
        success, image = self.video.read()
        # We are using Motion JPEG, but OpenCV defaults to capture raw images,
        # so we must encode it into JPEG in order to correctly display the
        # video stream.
        rects, landmarks = self.face_detect.detect_face(image, 80);  # min face size is set to 80x80
        for (i, rect) in enumerate(rects):
            aligned_frame, pos = self.aligner.align(160,image,landmarks[i]);
            # print(pos)
            if len(aligned_frame) == 160 and len(aligned_frame[0]) == 160:
                self.person_imgs[pos].append(aligned_frame)
                cv2.rectangle(image,(rect[0],rect[1]),(rect[0] + rect[2],rect[1]+rect[3]),(0,255,0),2) #draw bounding box for the face
                # print("Face captured!")
                # cv2.imshow("Captured face", aligned_frame)
        key = cv2.waitKey(1) & 0xFF

        
        
        # cv2.putText(frame,recog_data[i][0]+" - "+str(recog_data[i][1])+"%",(rect[0],rect[1]),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),1,cv2.LINE_AA)

        ret, jpeg = cv2.imencode('.jpg', image)
        return jpeg.tobytes()
Exemplo n.º 3
0
def addNewPerson(capture, newLabel):
    tensorGraph = tf.Graph()
    aligner = AlignCustom()
    allFeatures = FaceFeature(tensorGraph)
    face_detect = MTCNNDetect(tensorGraph, scale_factor=2)
    f = open('./facerec_128D.txt', 'r')
    data_set = json.loads(f.read())
    person_imgs = {
        "Left": [],
        "Right": [],
        "Center": []
    }
    person_features = {
        "Left": [],
        "Right": [],
        "Center": []
    }
    print(
        "Please start turning slowly. Press 'q' to save and add this new user to the dataset"
    )
    while True:
        _, frame = capture.read()
        rects, landmarks = face_detect.detect_face(frame, 80)
        # min face size is set to 80x80
        for (i, rect) in enumerate(rects):
            aligned_frame, pos = aligner.align(160, frame, landmarks[i])
            if len(aligned_frame) == 160 and len(aligned_frame[0]) == 160:
                person_imgs[pos].append(aligned_frame)
                cv2.imshow("Captured face", aligned_frame)
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            break

    cv2.destroyAllWindows()

    print("Capturing new face has ended")
    for pos in person_imgs:
        person_features[pos] = [
            np.mean(allFeatures.get_features(person_imgs[pos]),
                    axis=0).tolist()
        ]
    data_set[newLabel] = person_features
    f = open('./facerec_128D.txt', 'w')
    f.write(json.dumps(data_set))
    print("new face saved")
Exemplo n.º 4
0
def identifyPeople(capture):
    tensorGraph = tf.Graph()
    aligner = AlignCustom()
    allFeatures = FaceFeature(tensorGraph)
    face_detect = MTCNNDetect(tensorGraph, scale_factor=2)
    print("[INFO] camera sensor warming up...")
    while True:
        _, frame = capture.read()
        rects, landmarks = face_detect.detect_face(frame, 80)
        # min face size is set to 80x80
        aligns = []
        positions = []
        for (i, rect) in enumerate(rects):
            aligned_face, face_pos = aligner.align(160, frame, landmarks[i])
            if len(aligned_face) == 160 and len(aligned_face[0]) == 160:
                aligns.append(aligned_face)
                positions.append(face_pos)
            else:
                print("Align face failed")
        if (len(aligns) > 0):
            features_arr = allFeatures.get_features(aligns)
            recog_data = getKnownPeople(features_arr, positions)
            for (i, rect) in enumerate(rects):
                cv2.rectangle(frame, (rect[0], rect[1]),
                              (rect[0] + rect[2], rect[1] + rect[3]),
                              (255, 0, 0))  # draw bounding box for the face
                cv2.putText(
                    frame,
                    recog_data[i][0] + " - " + str(recog_data[i][1]) + "%",
                    (rect[0], rect[1]), cv2.FONT_HERSHEY_SIMPLEX, 1,
                    (255, 255, 255), 1, cv2.LINE_AA)

        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            break
    cv2.destroyAllWindows()
Exemplo n.º 5
0
def camera_recog():
    FRGraph = FaceRecGraph();
    MTCNNGraph = FaceRecGraph();
    aligner = AlignCustom();
    extract_feature = FaceFeature(FRGraph)
    face_detect = MTCNNDetect(MTCNNGraph, scale_factor=2); #scale_factor, rescales image for faster detection
    print("[INFO] camera sensor warming up...")
    vs = cv2.VideoCapture(0); #get input from webcam
    detect_time = time.time()
    while True:
        _,frame = vs.read();
        #u can certainly add a roi here but for the sake of a demo i'll just leave it as simple as this
        rects, landmarks = face_detect.detect_face(frame,80);#min face size is set to 80x80
        aligns = []
        positions = []

        for (i, rect) in enumerate(rects):
            aligned_face, face_pos = aligner.align(160,frame,landmarks[:,i])
            if len(aligned_face) == 160 and len(aligned_face[0]) == 160:
                aligns.append(aligned_face)
                positions.append(face_pos)
            else: 
                print("Align face failed") #log        
        if(len(aligns) > 0):
            features_arr = extract_feature.get_features(aligns)
            recog_data = findPeople(features_arr,positions)
            for (i,rect) in enumerate(rects):
                cv2.rectangle(frame,(rect[0],rect[1]),(rect[2],rect[3]),(255,0,0)) #draw bounding box for the face
                cv2.putText(frame,recog_data[i][0]+" - "+str(recog_data[i][1])+"%",(rect[0],rect[1]),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),1,cv2.LINE_AA)


        cv2.imshow("Frame",frame)
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            break
            
    return render_template('home.html')
Exemplo n.º 6
0
    def findpeopleface(self):

        self.video_flag = True

        parser = argparse.ArgumentParser()
        parser.add_argument("--mode",
                            type=str,
                            help="Run camera recognition",
                            default="camera")
        args = parser.parse_args(sys.argv[1:])
        FRGraph = FaceRecGraph()
        aligner = AlignCustom()
        extract_feature = FaceFeature(FRGraph)
        face_detect = MTCNNDetect(FRGraph, scale_factor=2)
        # scale_factor, rescales image for faster detection
        print("[INFO] camera sensor warming up...")

        vs = cv2.VideoCapture(
            'rtsp://*****:*****@192.168.2.131/cam/realmonitor?channel=1&subtype=0'
        )
        # get input from webcam
        #vs = cv2.VideoCapture('test.mp4')

        c = 0
        _, frame = vs.read()

        message = ''

        result_dict = {}

        while True:
            timeF = frame_interval

            if (c % timeF == 0):
                # u can certainly add a roi here but for the sake of a demo i'll just leave it as simple as this
                rects, landmarks = face_detect.detect_face(frame, 20)
                # min face size is set to 80x80
                aligns = []

                positions = []
                for (i, rect) in enumerate(rects):
                    aligned_face, face_pos = aligner.align(
                        182, frame, landmarks[i])
                    aligns.append(aligned_face)
                    positions.append(face_pos)
                features_arr = extract_feature.get_features(aligns)

                recog_data = findPeople(features_arr, positions)

                for (i, rect) in enumerate(rects):
                    cv2.rectangle(frame, (rect[0], rect[1]),
                                  (rect[0] + rect[2], rect[1] + rect[3]),
                                  (0, 255, 0),
                                  2)  # draw bounding box for the face
                    cv2.putText(
                        frame,
                        recog_data[i][0] + " - " + str(recog_data[i][1]) + "%",
                        (rect[0], rect[1]), cv2.FONT_HERSHEY_SIMPLEX, 1,
                        (255, 255, 255), 1, cv2.CV_AA)

                    if result_dict.has_key(recog_data[i][0]):
                        result_dict[recog_data[i][0]][1] += 1
                        result_dict[recog_data[i][0]][0] = (
                            result_dict[recog_data[i][0]][0] *
                            (result_dict[recog_data[i][0]][1] - 1) +
                            float(recog_data[i][1])
                        ) / result_dict[recog_data[i][0]][1]
                    else:
                        result_dict[recog_data[i][0]] = [
                            float(recog_data[i][1]), 1
                        ]

                cv2.cvtColor(frame, cv2.COLOR_BGR2RGB, frame)
                img = QtGui.QImage(frame.data, frame.shape[1], frame.shape[0],
                                   QtGui.QImage.Format_RGB888)
                pixmap = QtGui.QPixmap(img)
                self.label.setPixmap(pixmap)

                #result_dict是用来保存名称和精确度的字典,将它的按值排序给result_list并定义一个字符串message来保存result_list的内容并显示message
                result_list = sorted(result_dict.items(),
                                     key=lambda item: item[1][1],
                                     reverse=True)
                message = ''
                for i in result_list:
                    message += i[0]
                    message += ': \n'
                    message += str(i[1][0])[0:10]
                    message += '%\n'
                    message += str(i[1][1])[0:7]
                    message += ' times\n\n'
                self.plainTextEdit.setPlainText(message)

                key = cv2.waitKey(1) & 0xFF
                if self.video_flag == False:
                    break

            _, frame = vs.read()
            c += 1

        vs.release()
        cv2.destroyAllWindows()
Exemplo n.º 7
0
    def addface(self):
        parser = argparse.ArgumentParser()
        parser.add_argument("--mode",
                            type=str,
                            help="Run camera recognition",
                            default="camera")
        args = parser.parse_args(sys.argv[1:])
        FRGraph = FaceRecGraph()
        aligner = AlignCustom()
        extract_feature = FaceFeature(FRGraph)
        face_detect = MTCNNDetect(FRGraph, scale_factor=2)
        # scale_factor, rescales image for faster detection

        #这个print需要界面化
        print("Please input new user ID:")
        #new_name = raw_input("input ID:");  # ez python input()
        new_name = unicode(self.lineEdit.text().toUtf8(), 'utf8', 'ignore')

        print("ce shi dai ma")
        f = open('./facerec_128D.txt', 'r')
        data_set = json.loads(f.read())
        person_imgs = {
            "Left": [],
            "Right": [],
            "Center": []
        }
        person_features = {
            "Left": [],
            "Right": [],
            "Center": []
        }
        print(
            "Please start turning slowly. Press 'q' to save and add this new user to the dataset"
        )

        vs = cv2.VideoCapture(
            "rtsp://*****:*****@192.168.2.131/cam/realmonitor?channel=1&subtype=0"
        )

        while True:
            _, frame = vs.read()
            rects, landmarks = face_detect.detect_face(frame, 20)
            # min face size is set to 80x80
            for (i, rect) in enumerate(rects):
                aligned_frame, pos = aligner.align(182, frame, landmarks[i])
                person_imgs[pos].append(aligned_frame)
                cv2.imshow("Captured face", aligned_frame)

            cv2.cvtColor(frame, cv2.COLOR_BGR2RGB, frame)
            img = QtGui.QImage(frame.data, frame.shape[1], frame.shape[0],
                               QtGui.QImage.Format_RGB888)
            pixmap = QtGui.QPixmap(img)
            self.label.setPixmap(pixmap)

            key = cv2.waitKey(1) & 0xFF
            #if key == ord("q"):
            #   break
            if self.video_flag == False:
                break

        vs.release()
        cv2.destroyAllWindows()

        for pos in person_imgs:  # there r some exceptions here, but I'll just leave it as this to keep it simple
            print("ceshi")
            print(person_imgs[pos])
            person_features[pos] = [
                np.mean(extract_feature.get_features(person_imgs[pos]),
                        axis=0).tolist()
            ]
        data_set[new_name] = person_features

        print("done done done done")
        f = open('./facerec_128D.txt', 'w')
        f.write(json.dumps(data_set))
        exit(0)
Exemplo n.º 8
0
            # Align image and find the position of the face
            aligned_image, pos = aligner.align(DESIRED_SIZE, image, landmark_s);
            
            if len(aligned_image) == DESIRED_SIZE and len(aligned_image[0]) == DESIRED_SIZE:   
                # Load the aligned face to the proper angle
                person_imgs_from_different_angles[pos].append(aligned_image)



            print("[INFO] Number of persons in the image: ", len(rects)/4)

            # Extract the features from images
            for pos in person_imgs_from_different_angles:
                if (pos == "Center"):
                    try:
                        person_features_from_different_angles[pos] = [np.mean(feature_extractor.get_features(person_imgs_from_different_angles[pos]), axis = 0).tolist()]
                    except Exception:
                        pass
                else:
                    person_features_from_different_angles[pos] = [[0 for i in range(128)]]
            
            data_set[img_name] = person_features_from_different_angles

            # Write back to db
            f = open('./faces_db.txt', 'w')
            f.write(json.dumps(data_set))

            print("[INFO] Added image to database... ")

        else:
            pass
Exemplo n.º 9
0
        result = "Unregistered"
        smallest = sys.maxsize
        for person in data_set.keys():
            person_data = data_set[person]
            distance = np.sqrt(np.sum(np.square(person_data - features_512D)))
            if (distance < smallest):
                smallest = distance
                result = person
        percentage = min(100, 100 * thres / smallest)
        if percentage <= percent_thres:
            result = "Unregistered"
        returnRes.append((result, percentage))
    return returnRes


while True:  # press Ctrl-C to stop image display program
    image_name, image = image_hub.recv_image()
    # auto_result, alpha, beta = automatic_brightness_and_contrast(image)
    # cartoon, color, edges, img = vector_image(auto_result)
    # features_arr = extract_feature.get_features(image)
    # findPeople(features_arr)
    # print(features_arr)

    cv2.imshow(image_name, image)
    features_arr = extract_feature.get_features(image)

    # cv2.waitKey(1)  # wait until a key is pressed
    # image_hub.send_reply(b'OK')
    # key = cv2.waitKey(1) & 0xFF
    # if key == ord("q"):
    #     break
Exemplo n.º 10
0
class VideoCamera(object):
    def __init__(self):
        # Using OpenCV to capture from device 0. If you have trouble capturing
        # from a webcam, comment the line below out and use a video file
        # instead.
        FRGraph = FaceRecGraph()
        self.aligner = AlignCustom()
        self.extract_feature = FaceFeature(FRGraph)
        self.face_detect = MTCNNDetect(FRGraph, scale_factor=2) #scale_factor, rescales image for faster detection

        self.person_imgs = {"Left" : [], "Right": [], "Center": []}
        self.person_features = {"Left" : [], "Right": [], "Center": []}
        self.video = cv2.VideoCapture(2)

        self.names = json.load(open("names.txt"))
        # If you decide to use video.mp4, you must have this file in the folder
        # as the main.py.
        # self.video = cv2.VideoCapture('video.mp4')
    
    def __del__(self):
        self.video.release()
    
    def get_frame(self):

        _,frame = self.video.read()

        if type(frame)==type(None):
            print("Frame not availble (None)")
            exit()

        # u can certainly add a roi here but for the sake of a demo i'll just leave it as simple as this
        rects, landmarks = self.face_detect.detect_face(frame, 80)#min face size is set to 80x80
        aligns = []
        positions = []



        for (i, rect) in enumerate(rects):
            aligned_face, face_pos = self.aligner.align(160,frame,landmarks[i])
            if len(aligned_face) == 160 and len(aligned_face[0]) == 160:
                aligns.append(aligned_face)
                positions.append(face_pos)
            else: 
                print("Align face failed") #log        
                
        if(len(aligns) > 0):
            features_arr = self.extract_feature.get_features(aligns)
            recog_data = findPeople(features_arr,positions)
            for (i,rect) in enumerate(rects):
                cv2.rectangle(frame,(rect[0],rect[1]),(rect[0] + rect[2],rect[1]+rect[3]),(0,255,0),2) #draw bounding box for the face
                cv2.putText(frame,recog_data[i][0]+" - "+str(recog_data[i][1])+"%",(rect[0],rect[1]),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),1,cv2.LINE_AA)
                for rollno, name in self.names.items():   
                    if name == recog_data[i][0]:
                        for i in range(1):
                            print(str(recog_data))
                            # vs.release()
                            # cv2.destroyAllWindows()
                            # os.system('python auth1.py')

                            # vs.release()
                            # cv2.destroyAllWindows()
                            # vs.release() # camera release 
                            # cv2.destroyAllWindows() 
                    else:
                        pass 
            
        # cv2.imshow("Capturing Face",frame)
        # key = cv2.waitKey(1) & 0xFF
        # if key == 27 or key == ord("q"):
        #     break


        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()
Exemplo n.º 11
0
rootdir = "/home/erwin/Desktop/photo_register/23434"
# for files in os.listdir(rootdir):
#     new_name = rootdir+'/'+files
#     print(new_name)
# for file in files:
#     if file.endswith(".jpg"):
#         # try:
#         vs = os.path.join(subdir, file)
#         # print(vs)
frame = cv2.imread('/home/erwin/Desktop/1_670074_DST0163_08:59:27.176995.jpg')
# rects, landmarks = face_detect.detect_face(frame,20)
# for (i, rect) in enumerate(rects):
#     cv2.rectangle(frame,(rect[0],rect[1]),(rect[2],rect[3]),(0,255,0),2)
#     aligned_face, face_pos = aligner.align(160,frame,landmarks[:,i])
# # aligned_face, face_pos = aligner.align(160,frame,landmarks[:,i])
# # frame = plt.imread(vs, format=None)
# # frame = vs.read()
# # frame = frame[:, :, (2, 0, 1)]
# # print(frame)
# # print(type(frame))
# # cv2.imshow(frame)
# global features_arr
features_arr = extract_feature.get_features(frame)
recog_data = findPeople(features_arr)
# print(recog_data)
#     if new_name==recog_data[0]:
#         benar = benar + 1
#     else:
#         salah = salah + 1
# except:
#     continue
Exemplo n.º 12
0
class Second(QDialog):
    def __init__(self, parent=None):
        super(Second, self).__init__(parent)
        loadUi('addImage.ui', self)

        self.face_detector = cv2.CascadeClassifier(
            'haarcascade_frontalface_default.xml')
        self.FRGraph = FaceRecGraph()
        self.MTCNNGraph = FaceRecGraph()
        self.aligner = AlignCustom()
        self.extract_feature = FaceFeature(self.FRGraph)
        self.face_detect = MTCNNDetect(self.MTCNNGraph, scale_factor=2)
        self.person_imgs = {
            "Left": [],
            "Right": [],
            "Center": []
        }
        self.person_features = {
            "Left": [],
            "Right": [],
            "Center": []
        }
        self.init_ui()
        self.count = 0

    def init_ui(self):
        self.title = "Yüz Ekleme"
        self.top = 200
        self.left = 650
        self.width = 640
        self.height = 640
        #        self.imageData=QLabel(self)
        #        pixmap=QPixmap("face.png")
        #        self.imageData.setPixmap(pixmap)
        #        self.imageData.setAlignment(QtCore.Qt.AlignCenter)
        #        self.vbox.addWidget(self.labelImage)
        imageData = cv2.imread("face.png", 2)
        qformat = QImage.Format_Indexed8

        #        qformat=QImage.Format_RGB888
        outImage = QImage(imageData, imageData.shape[1], imageData.shape[0],
                          imageData.strides[0], qformat)
        outImage = outImage.rgbSwapped()
        self.labelImage.setPixmap(QPixmap.fromImage(outImage))
        self.labelImage.setScaledContents(True)

        #        self.face_id=self.personelId(self.path)
        self.setFixedSize(self.width, self.height)
        self.setGeometry(self.left, self.top, self.width, self.height)
        self.setWindowTitle(self.title)
        self.image = None
        #        self.imageData.hide()

        self.addImage.clicked.connect(self.clickMethod)

    def clickMethod(self):
        self.capture = cv2.VideoCapture(camType)
        #        self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT,480)
        #        self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT,640)

        ret, self.image = self.capture.read()
        self.image = cv2.flip(self.image, 1)
        #        detect_image=self.detect_face(self.image)
        self.displayImage(self.image, 1)

        detect_name = self.detect_person(self.image)

        if detect_name == "Unknown":

            self.timer = QTimer(self)
            self.timer.timeout.connect(self.update_frame)
            self.timer.start(5)
        else:
            buttonReply = QMessageBox.question(self, 'Uyarı', "Yüz Kayıtlı!",
                                               QMessageBox.Cancel)
            if buttonReply == QMessageBox.Cancel:
                self.close()
                self.destroy()

    def unique(self, list1):
        unique_list = []
        for x in list1:
            if x not in unique_list:
                unique_list.append(x)

        return unique_list

    def personelId(self, path):

        imagePaths = [os.path.join(path, f) for f in os.listdir(path)]
        ids = []
        for imagePath in imagePaths:
            ids.append(os.path.split(imagePath)[-1].split(".")[1])
#            print(os.path.split(imagePath)[-1])
        unique_id = self.unique(ids)
        if len(unique_id) == 0:
            return 0
        return int(unique_id[-1]) + 1

    def adjust_gamma(self, face_id, name, count, resim, gamma=2.5):
        # build a lookup table mapping the pixel values [0, 255] to
        # their adjusted gamma values
        self.count = count
        self.count += 1

        img = cv2.imread(
            "dataset/User." + str(self.face_id) + "." + str(name) + '.' +
            str(resim) + ".jpg", 0)
        invGamma = 1.0 / gamma
        table = np.array([((i / 255.0)**invGamma) * 255
                          for i in np.arange(0, 256)]).astype("uint8")
        # apply gamma correction using the lookup table
        cv2.imwrite(
            "dataset/User." + str(self.face_id) + "." + str(name) + '.' +
            str(self.count) + ".jpg", cv2.LUT(img, table))
        return self.count

    def newImage(self, face_id, name, count, resim):
        self.count = count
        img = cv2.imread(
            "dataset/User." + str(self.face_id) + "." + str(name) + '.' +
            str(resim) + ".jpg", 0)
        clahe = cv2.createCLAHE(clipLimit=8.0, tileGridSize=(8, 8))
        cl1 = clahe.apply(img)
        self.count += 1
        cv2.imwrite(
            "dataset/User." + str(self.face_id) + "." + str(name) + '.' +
            str(self.count) + ".jpg", cl1)
        #    bright=cv2.addWeighted(img,2,np.zeros(img.shape,img.dtype),0,50)
        #    count+=1
        #    cv2.imwrite("dataset/User." + str(face_id) +"." +str(name) +'.' + str(count) + ".jpg", bright)
        equ = cv2.equalizeHist(img)
        self.count += 1
        cv2.imwrite(
            "dataset/User." + str(self.face_id) + "." + str(name) + '.' +
            str(self.count) + ".jpg", equ)
        self.count = self.adjust_gamma(face_id, name, self.count, resim)
        print(count)
        return self.count

    def closeEvent(self, event):
        name = self.lineEdit.text()
        f = open('./facerec_128D.txt', 'r')
        data_set = json.loads(f.read())
        for pos in self.person_imgs:  #there r some exceptions here, but I'll just leave it as this to keep it simple
            self.person_features[pos] = [
                np.mean(self.extract_feature.get_features(
                    self.person_imgs[pos]),
                        axis=0).tolist()
            ]


#            print(person_features)
        data_set[name] = self.person_features
        f = open('./facerec_128D.txt', 'w')
        f.write(json.dumps(data_set))
        self.close()
        self.window = Window()
        #        self.window.doTraining()
        self.window.show()

    def detect_face(self, img):

        name = self.lineEdit.text()
        f = open('./facerec_128D.txt', 'r')
        data_set = json.loads(f.read())
        #        person_imgs = {"Left" : [], "Right": [], "Center": []};
        #        person_features = {"Left" : [], "Right": [], "Center": []};
        rects, landmarks = self.face_detect.detect_face(img, 80)
        # min face size is set to 80x80
        for (i, rect) in enumerate(rects):
            self.count += 1
            aligned_frame, pos = self.aligner.align(160, img, landmarks[:, i])
            print(aligned_frame)
            if len(aligned_frame) == 160 and len(aligned_frame[0]) == 160:
                self.person_imgs[pos].append(aligned_frame)
                self.displayImage(aligned_frame, 1)

                if (self.count >= 80):
                    self.count = 0
                    self.timer.stop()
                    self.close()

        return img

    def detect_person(self, img):
        person_name = ""
        #        self.timer.stop()
        rects, landmarks = self.face_detect.detect_face(img, 80)
        #min face size is set to 80x80
        aligns = []
        positions = []

        for (i, rect) in enumerate(rects):
            aligned_face, face_pos = self.aligner.align(
                160, img, landmarks[:, i])
            if len(aligned_face) == 160 and len(aligned_face[0]) == 160:
                aligns.append(aligned_face)
                positions.append(face_pos)
            else:
                print("Align face failed")  #log
        if (len(aligns) > 0):
            features_arr = self.extract_feature.get_features(aligns)
            person_name = self.findPeople(features_arr, positions)
        return person_name

    def findPeople(self, features_arr, positions, thres=0.6, percent_thres=70):
        f = open('./facerec_128D.txt', 'r')
        data_set = json.loads(f.read())
        returnRes = ""
        for (i, features_128D) in enumerate(features_arr):
            result = "Unknown"
            smallest = sys.maxsize
            for person in data_set.keys():
                person_data = data_set[person][positions[i]]
                for data in person_data:
                    distance = np.sqrt(np.sum(np.square(data - features_128D)))
                    if (distance < smallest):
                        smallest = distance
                        result = person
            percentage = min(100, 100 * thres / smallest)
            if percentage <= percent_thres:
                result = "Unknown"
            returnRes = result
        return returnRes

    def update_frame(self):
        ret, self.image = self.capture.read()
        self.image = cv2.flip(self.image, 1)
        detect_image = self.detect_face(self.image)
        self.displayImage(detect_image, 1)

    def displayImage(self, img, window=1):
        qformat = QImage.Format_Indexed8
        if len(img.shape) == 3:
            if img.shape[2] == 4:
                qformat = QImage.Format_RGBA8888
            else:
                qformat = QImage.Format_RGB888
        outImage = QImage(img, img.shape[1], img.shape[0], img.strides[0],
                          qformat)
        outImage = outImage.rgbSwapped()

        if window == 1:
            self.labelImage.setPixmap(QPixmap.fromImage(outImage))
            self.labelImage.setScaledContents(True)
        if window == 2:
            self.processedLabel.setPixmap = (QPixmap.fromImage(outImage))
            self.processedImage.setScaledContents(True)
Exemplo n.º 13
0
class Window(QtWidgets.QWidget):
    def __init__(self):
        super(Window, self).__init__()
        self.detector = cv2.CascadeClassifier(
            "haarcascade_frontalface_default.xml")
        self.FRGraph = FaceRecGraph()
        self.MTCNNGraph = FaceRecGraph()
        self.aligner = AlignCustom()
        self.extract_feature = FaceFeature(self.FRGraph)
        self.face_detect = MTCNNDetect(self.MTCNNGraph, scale_factor=2)
        #scale_factor, rescales image for faster detection
        #        self.face_detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
        self.InitWindow()

    def InitWindow(self):

        self.title = "Yüz Tanıma"
        self.top = 200
        self.left = 650
        self.width = 640
        self.height = 640
        self.setFixedSize(self.width, self.height)
        self.image = None

        self.setWindowTitle(self.title)
        self.setGeometry(self.left, self.top, self.width, self.height)

        self.timer = QTimer(self)
        self.run_button = QtWidgets.QPushButton('Yüzü Bul')
        self.addImage = QtWidgets.QPushButton('Veri Ekle')
        self.doTrain = QtWidgets.QPushButton('Train Et')

        self.run_button.clicked.connect(self.findImage)
        self.addImage.clicked.connect(self.imageAdd)

        self.doTrain.clicked.connect(self.doTraining)
        self.vbox = QVBoxLayout()

        print(camType)
        #        first=FirstScreen()
        #        print(first.camType)
        #        first=FirstScreen()
        #        print(first.buttonClick.camType)

        self.imageBox = QLabel(self)
        self.imageBox.resize(460, 330)

        self.vbox.addWidget(self.imageBox)
        self.vbox.addWidget(self.run_button)
        self.vbox.addWidget(self.addImage)
        self.vbox.addWidget(self.doTrain)

        self.setLayout(self.vbox)
        self.timer.stop()

    def unique(self, list1):
        unique_list = []
        for x in list1:
            if x not in unique_list:
                unique_list.append(x)
        return unique_list

    def closeEvent(self, event):
        #        reply = QMessageBox.question(self, 'Quit', 'Are You Sure to Quit?', QMessageBox.No | QMessageBox.Yes)
        #        if reply == QMessageBox.Yes:
        #            event.accept()
        #            self.close()
        #        else:
        #            event.ignore()

        self.close()
        self.firstScreen = FirstScreen()
        self.firstScreen.show()

    def faceNames(self, path):
        imagePaths = [os.path.join(path, f) for f in os.listdir(path)]
        name = []
        ids = []
        for imagePath in (imagePaths):
            name.append(os.path.split(imagePath)[-1].split(".")[2])
            ids.append(os.path.split(imagePath)[-1].split(".")[1])
        unique_ids = self.unique(ids)
        return name, unique_ids

    def getName(self, searchId, path):
        imagePaths = [os.path.join(path, f) for f in os.listdir(path)]
        for imagePath in (imagePaths):
            if (searchId == os.path.split(imagePath)[-1].split(".")[1]):
                return os.path.split(imagePath)[-1].split(".")[2]

    def update_frame(self):

        ret, self.image = self.capture.read()
        self.image = cv2.flip(self.image, 1)
        detect_image = self.detect_face(self.image)
        self.displayImage(detect_image, 1)

    def displayImage(self, img, window=1):
        qformat = QImage.Format_Indexed8
        if len(img.shape) == 3:
            if img.shape[2] == 4:
                qformat = QImage.Format_RGBA8888
            else:
                qformat = QImage.Format_RGB888
        outImage = QImage(img, img.shape[1], img.shape[0], img.strides[0],
                          qformat)
        outImage = outImage.rgbSwapped()
        if window == 1:

            self.imageBox.setPixmap(QPixmap.fromImage(outImage))
            self.imageBox.setScaledContents(True)
        if window == 2:
            self.processedLabel.setPixmap = (QPixmap.fromImage(outImage))
            self.processedImage.setScaledContents(True)

    def getImagesAndLabels(self, path):

        imagePaths = [os.path.join(path, f) for f in os.listdir(path)]
        faceSamples = []
        ids = []
        names = []

        for imagePath in imagePaths:

            PIL_img = Image.open(imagePath).convert(
                'L')  # convert it to grayscale
            img_numpy = np.array(PIL_img, 'uint8')

            id = int(os.path.split(imagePath)[-1].split(".")[1])
            names = (os.path.split(imagePath)[-1].split(".")[2])
            faces = self.detector.detectMultiScale(img_numpy)

            for (x, y, w, h) in faces:
                faceSamples.append(img_numpy[y:y + h, x:x + w])
                ids.append(id)

        return faceSamples, ids, names

    def findImage(self):
        #        self.timer.stop()

        self.capture = cv2.VideoCapture(camType)
        self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
        self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 640)
        #        self.timer=QTimer(self)
        self.timer.timeout.connect(self.update_frame)
        self.timer.start(5)

    def imageAdd(self):
        #        self.run_button.clicked.disconnect(self.record_video.start_recording)
        self.timer.stop()
        #        self.close()
        self.setVisible(False)
        self.firstScreen = FirstScreen()
        #        self.firstScreen.close()
        self.firstScreen.setVisible(False)
        self.SW = Second()
        self.SW.show()

    def detect_face(self, img):
        #        self.timer.stop()
        rects, landmarks = self.face_detect.detect_face(img, 80)
        #min face size is set to 80x80
        aligns = []
        positions = []

        for (i, rect) in enumerate(rects):
            aligned_face, face_pos = self.aligner.align(
                160, img, landmarks[:, i])
            if len(aligned_face) == 160 and len(aligned_face[0]) == 160:
                aligns.append(aligned_face)
                positions.append(face_pos)
            else:
                print("Align face failed")  #log
        if (len(aligns) > 0):
            features_arr = self.extract_feature.get_features(aligns)
            recog_data = self.findPeople(features_arr, positions)
            for (i, rect) in enumerate(rects):
                cv2.rectangle(img, (rect[0], rect[1]), (rect[2], rect[3]),
                              (255, 0, 0))  #draw bounding box for the face
                cv2.putText(
                    img,
                    recog_data[i][0] + " - " + str(recog_data[i][1]) + "%",
                    (rect[0], rect[1]), cv2.FONT_HERSHEY_SIMPLEX, 1,
                    (255, 255, 255), 1, cv2.LINE_AA)
        return img

    def findPeople(self, features_arr, positions, thres=0.6, percent_thres=70):
        f = open('./facerec_128D.txt', 'r')
        data_set = json.loads(f.read())
        returnRes = []
        for (i, features_128D) in enumerate(features_arr):
            result = "Unknown"
            smallest = sys.maxsize
            for person in data_set.keys():
                person_data = data_set[person][positions[i]]
                for data in person_data:
                    distance = np.sqrt(np.sum(np.square(data - features_128D)))
                    if (distance < smallest):
                        smallest = distance
                        result = person
            percentage = min(100, 100 * thres / smallest)
            if percentage <= percent_thres:
                result = "Unknown"
            returnRes.append((result, percentage))
        return returnRes

    def doTraining(self):
        self.timer.stop()
        path = 'dataset\\'

        recognizer = cv2.face.LBPHFaceRecognizer_create()
        print(
            "\n [BILGI] Yüzler eğitiliyor. Biraz Zaman Alabilir. Lütfen bekleyiniz..."
        )
        faces, ids, names = self.getImagesAndLabels(path)
        recognizer.train(faces, np.array(ids))

        # Save the model into trainer/trainer.yml
        recognizer.write('trainer/trainer.yml'
                         )  # recognizer.save() worked on Mac, but not on Pi

        # Print the numer of faces trained and end program
        print("\n [BILGI] {0} yüz eğitildi..".format(len(np.unique(ids))))