Esempio n. 1
0
def train():

    from face_train import train

    classifier = train("knn_examples/train",
                       model_save_path="trained_knn_model.clf",
                       n_neighbors=11)

    return render_template('Train.html')
Esempio n. 2
0
    def EditInfo(self):
        global luru
        global mkpath
        global number
        name = self.ui.lineEdit.text()
        number = self.ui.lineEdit_2.text()
        cla = self.ui.lineEdit_3.text()
        sex = self.ui.lineEdit_4.text()
        age = self.ui.lineEdit_5.text()
        if number:
            mkpath = "./data/" + number
            mkdir(mkpath)
            if find_data(number):
                # tim = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
                #GUI嵌入摄像头
                # self.openCamera()
                # luru = 1
                #重新打开摄像头窗口
                reply = QMessageBox.question(self.ui, '信息已存在', '确认重新采集人脸?',
                                             QMessageBox.Yes | QMessageBox.No,
                                             QMessageBox.No)
                if reply == QMessageBox.Yes:
                    face_save.generate(mkpath, number)
                    face_train.train(mkpath, number)
                else:
                    QMessageBox.question(self.ui, '提示', '可直接签到',
                                         QMessageBox.Yes)

            else:
                if name and number and cla and sex and age:
                    # tim = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
                    add_data(name, number, cla, sex, age)
                    #GUI嵌入摄像头
                    # self.openCamera()
                    # luru = 1
                    #重新打开摄像头窗口
                    face_save.generate(mkpath, number)
                    face_train.train(mkpath, number)
                    self.ui.label_8.setText(str(panduan.peple()))
                else:
                    QMessageBox.question(self.ui, '提示', '请先输入信息',
                                         QMessageBox.Yes)
        else:
            QMessageBox.question(self.ui, '提示', '未确认到信息输入', QMessageBox.Yes)
Esempio n. 3
0
def start(name):
    face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')

    a = 0
    while True:
        s, img = vid.read()
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        faces = face_cascade.detectMultiScale(gray, 1.3, 5)
        for (x, y, w, h) in faces:
            cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
            roi_gray = gray[y:y + h, x:x + w]
            cv2.imwrite("images/" + name + "/" + str(a) + ".png", roi_gray)
            a = a + 1
        cv2.imshow('frame', img)
        if cv2.waitKey(100) == 97 or a > 200:
            break
    face_train.train()
    vid.release()
    cv2.destroyAllWindows()
 def clicked1(self):
     face_train.train()
     get1 = face_train.train()
     self.label_3.setText("Status: " + get1)
     self.update()
Esempio n. 5
0
		singular_image.append(list())
		for j in range(60):
			if cur_line[j] != ' ':
				singular_image[i].append(1)
			else:
				singular_image[i].append(0);
	probability_per_class = list()

	for i in range(2):
		probability_per_class.append(get_prob(i, singular_image, digit_matrices, 1))
	
	max_value = max(probability_per_class)
	max_index = probability_per_class.index(max_value) 

	digit_class = int(line)
	global num_per_class
	global correct_count
	global correct_per_class
	global confusion_matrix
	confusion_matrix[digit_class][max_index] += 1
	num_per_class[digit_class] +=1
	if max_index == digit_class:
		correct_count+=1
		correct_per_class[max_index]+=1
	# print (max_index, digit_class)



if __name__ == '__main__':
	test(rtrain.train())
Esempio n. 6
0
    def show_camera(self):
        # global qflag 保存图片

        # global Id
        # mkpath = "./data/test"
        global sampleNum
        global luru
        global mkpath
        global number
        global facecheck
        # Id = self.ui.lineEdit_2.text()
        # if Id:
        #     luru = 1
        #     mkpath = "./data/" + Id
        #     mkdir(mkpath)

        # self.clear_text()
        detector = cv2.CascadeClassifier(
            'haarcascade/haarcascade_frontalface_default.xml')

        flag, self.imagefan = self.ui.cap.read()
        self.image = cv2.flip(self.imagefan, 1)
        # print(luru)
        if luru:
            faces = detector.detectMultiScale(self.image, 1.3, 5)
            for (x, y, w, h) in faces:
                cv2.rectangle(self.image, (x, y), (x + w, y + h), (255, 0, 0),
                              2)
                gray_than = (h) / (w)
                f = cv2.resize(self.image[y:y + h, x:x + w],
                               (200, int(200 * gray_than)))
                # incrementing sample number
                sampleNum = sampleNum + 1
                # print(mkpath + "/User." + str(number) +'.' + str(sampleNum) + ".jpg")
                # saving the captured face in the dataset folder
                cv2.imwrite(mkpath + "/User." + str(number) + '.' +
                            str(sampleNum) + ".jpg", f)  #
                cv2.putText(self.image, str((x, y)), (x, y),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, 255, 2)
                cv2.putText(self.image, str(sampleNum), (x, y - 20),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, 255, 2)

                show = cv2.resize(self.image, (800, 600))
                show = cv2.cvtColor(show, cv2.COLOR_BGR2RGB)
                showImage = QImage(show.data, show.shape[1], show.shape[0],
                                   QImage.Format_RGB888)
                self.ui.label_6.setPixmap(QPixmap.fromImage(showImage))
                if sampleNum >= 30:
                    self.closeCamera()
                    face_train.train(mkpath, number)
                    luru = 0
            # cv2.imshow('frame', self.image)
            # wait for 1 miliseconds
            # self.image = self.imagefan

        # elif facecheck == 1:
        #     recognizer = cv2.face.LBPHFaceRecognizer_create()
        #     # recognizer = cv2.createLBPHFaceRecognizer() # in OpenCV 2
        #     recognizer.read('trainner/trainner.xml')
        #     # recognizer.load('trainner/trainner.yml') # in OpenCV 2

        #     cascade_path = "haarcascade/haarcascade_frontalface_default.xml"
        #     face_cascade = cv2.CascadeClassifier(cascade_path)
        #     font = cv2.FONT_HERSHEY_SIMPLEX
        #     j = 1
        #     t = 3
        #     time_go=time.time()
        #     if j==1:
        #         time_start = time_go
        #         j = j - 1
        #     time_tal = int(time_go-time_start)
        #     gray = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
        #     faces = face_cascade.detectMultiScale(gray, 1.3, 5)
        #     for (x, y, w, h) in faces:
        #         cv2.rectangle(self.image, (x - 50, y - 50), (x + w + 50, y + h + 50), (225, 0, 0), 2)
        #         my_id, conf = recognizer.predict(gray[y:y + h, x:x + w])
        #         print(my_id, conf)
        #         if conf > 0 and conf <50:
        #             # facedata = sqlTest.find_data(str(my_id)[1])
        #             img_id = "master" + str(my_id)
        #             t=1
        #         else:
        #             img_id = "Unknown"
        #             t=0
        #         # cv2.cv.PutText(cv2.cv.fromarray(im), str(Id), (x, y + h), font, 255)
        #         # print ("Label: %s, Confidence: %.2f" % (img_id, 100-conf))
        #         cv2.putText(self.image, str(img_id), (x, y - 20), font, 1, 255, 2)
        #         show = cv2.resize(self.image,(800,600))
        #         show = cv2.cvtColor(show, cv2.COLOR_BGR2RGB)
        #         showImage = QImage(show.data, show.shape[1],show.shape[0],QImage.Format_RGB888)
        #         self.ui.label_6.setPixmap(QPixmap.fromImage(showImage))
        #         if time_tal >= 300:
        #             t = 0
        #             self.closeCamera()
        #             facecheck = 0
        #         if t == 1:
        #             self.closeCamera()
        #             # dat1 = [[str(facedata[0]),str(facedata[1]),str(facedata[2]),str(facedata[3]),str(facedata[4]),time.strftime("%H:%M:%S",time.localtime())]]
        #             # common.append_csv("record.csv",dat1)
        #             print('hello')

        else:

            show = cv2.resize(self.image, (800, 600))
            show = cv2.cvtColor(show, cv2.COLOR_BGR2RGB)
            showImage = QImage(show.data, show.shape[1], show.shape[0],
                               QImage.Format_RGB888)
            self.ui.label_6.setPixmap(QPixmap.fromImage(showImage))
Esempio n. 7
0
        singular_image.append(list())
        for j in range(60):
            if cur_line[j] != ' ':
                singular_image[i].append(1)
            else:
                singular_image[i].append(0)
    probability_per_class = list()

    for i in range(2):
        probability_per_class.append(
            get_prob(i, singular_image, digit_matrices, 1))

    max_value = max(probability_per_class)
    max_index = probability_per_class.index(max_value)

    digit_class = int(line)
    global num_per_class
    global correct_count
    global correct_per_class
    global confusion_matrix
    confusion_matrix[digit_class][max_index] += 1
    num_per_class[digit_class] += 1
    if max_index == digit_class:
        correct_count += 1
        correct_per_class[max_index] += 1
    # print (max_index, digit_class)


if __name__ == '__main__':
    test(rtrain.train())
def menu():
    ip = ([
        ip for ip in socket.gethostbyname_ex(socket.gethostname())[2]
        if not ip.startswith("127.")
    ][0])
    ip_ask = input("this programme needs to use the ip of this computer y/n")
    if (ip_ask == "y"):
        print("we got the ip" + ip)
        with open("setting.txt", "w") as fic:
            fic.write("ip:" + str(ip) + "/")
    if (ip_ask == "n"):
        print(
            "sorry but this version needs the ip, there his no sollution for this version"
        )
    microphone_ask = input(
        "do you use a microphone compatible with the libary pyaudio |yes|no|don't know"
    )
    microphone_ask_find = (microphone_ask.find("know"))
    if (microphone_ask_find is not -1):
        FORMAT = pyaudio.paInt16
        CHANNELS = 1
        RATE = 44100
        p = pyaudio.PyAudio()
        stream = p.open(format=FORMAT,
                        channels=CHANNELS,
                        rate=RATE,
                        input=True,
                        output=True,
                        frames_per_buffer=CHUNK)
        data = stream.read(CHUNK)
        data_int = np.array(struct.unpack(str(2 * CHUNK) + 'B', data))
        print(data_int)
    password = input("what password do you want to setup?")
    with open("setting.txt", "a") as fic:
        fic.write("pas==" + password + "/")

    with open("setting.txt", "r") as fic:
        set_ = fic.read()
    data2 = ("setting set" + str(set_))
    socket_client.socket_client(data2, ip)
    opencv = input("do you have a camera:")
    if (opencv == "yes"):
        import cv2
        from PIL import Image
        import numpy as np
        import pickle
        with open("setting.txt", "a") as fic:
            fic.write("cam=yes")
        print("ok we will take some picture.")
        print("the admin pleas go infront of the camera alone")
        found = (0)
        while found == 0:
            print(found)
            print("found")
            face_cascade = cv2.CascadeClassifier(
                'cascade/data/haarcascade_frontalface_alt.xml')
            cap = cv2.VideoCapture(0)
            ret, frame = cap.read()
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            faces = face_cascade.detectMultiScale(gray,
                                                  scaleFactor=1.5,
                                                  minNeighbors=5)
            for (x, y, w, h) in faces:
                roi_gray = gray[y:y + h, x:x + w]
                img_item = ("image/user1/ima10.png")
                cv2.imwrite(img_item, roi_gray)
                cap = cv2.VideoCapture(0)
                ret, frame = cap.read()
                img_item = ("image/user1/ima" + "11.png")
                cv2.imwrite(img_item, roi_gray)
                cap = cv2.VideoCapture(0)
                ret, frame = cap.read()
                img_item = ("image/user1/ima" + "11.png")
                cv2.imwrite(img_item, roi_gray)
                cap = cv2.VideoCapture(0)
                ret, frame = cap.read()
                img_item = ("image/user1/ima12.png")
                cap = cv2.VideoCapture(0)
                cv2.imwrite(img_item, roi_gray)
                ret, frame = cap.read()
                img_item = ("image/user1/ima13.png")
                cap = cv2.VideoCapture(0)
                ret, frame = cap.read()
                cv2.imwrite(img_item, roi_gray)
                img_item = ("image/user1/ima14.png")
                cap = cv2.VideoCapture(0)
                ret, frame = cap.read()
                cv2.imwrite(img_item, roi_gray)
                img_item = ("image/user1/ima15.png")
                cap = cv2.VideoCapture(0)
                ret, frame = cap.read()
                cv2.imwrite(img_item, roi_gray)
                found = (1)

                d = input("pleas put another face")
                found = (0)
                while found == 0:
                    print(found)
                    print("found")
                    face_cascade = cv2.CascadeClassifier(
                        'cascade/data/haarcascade_frontalface_alt.xml')
                    cap = cv2.VideoCapture(0)
                    ret, frame = cap.read()
                    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                    faces = face_cascade.detectMultiScale(gray,
                                                          scaleFactor=1.5,
                                                          minNeighbors=5)
                    for (x, y, w, h) in faces:
                        roi_gray = gray[y:y + h, x:x + w]
                        img_item = ("image/user2/ima10.png")
                        cv2.imwrite(img_item, roi_gray)
                        cap = cv2.VideoCapture(0)
                        ret, frame = cap.read()
                        img_item = ("image/user2/ima" + "11.png")
                        cv2.imwrite(img_item, roi_gray)
                        cap = cv2.VideoCapture(0)
                        ret, frame = cap.read()
                        img_item = ("image/user2/ima" + "11.png")
                        cv2.imwrite(img_item, roi_gray)
                        cap = cv2.VideoCapture(0)
                        ret, frame = cap.read()
                        img_item = ("image/user2/ima12.png")
                        cap = cv2.VideoCapture(0)
                        cv2.imwrite(img_item, roi_gray)
                        ret, frame = cap.read()
                        img_item = ("image/user2/ima13.png")
                        cap = cv2.VideoCapture(0)
                        ret, frame = cap.read()
                        cv2.imwrite(img_item, roi_gray)
                        img_item = ("image/user2/ima14.png")
                        cap = cv2.VideoCapture(0)
                        ret, frame = cap.read()
                        cv2.imwrite(img_item, roi_gray)
                        img_item = ("image/user2/ima15.png")
                        cap = cv2.VideoCapture(0)
                        ret, frame = cap.read()
                        cv2.imwrite(img_item, roi_gray)
                        number_img = (0)
                        face_train.train()
                        print("we recomande you to restart the program")
                        found = 1
Esempio n. 9
0
import numpy as np
import cv2
import pickle
import time
from tkinter import *
import tkMessageBox
import os
from face_train import train

# running the face train algorithm
train()

face_cascade = cv2.CascadeClassifier(
    'cascades/data/haarcascades/haarcascade_frontalface_alt2.xml')
eye_cascade = cv2.CascadeClassifier(
    'cascades/data/haarcascades/haarcascade_eye.xml')
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read("trainner.yml")

labels = {"person_name": 1}
with open("labels.pickle", 'rb') as f:
    # dump the label ids to a file
    og_labels = pickle.load(f)
    labels = {v: k for k, v in og_labels.items()}

cap = cv2.VideoCapture(0)

match = 0
mismatch = 0

timeout = time.time() + 10