Exemple #1
0
 def capture(self):
     from Name import name
     user = name()
     with open('write_data.txt') as f:
         for line1 in f:
             info = line1.split()
             user.id.append(info[0])
             user.name.append(info[1])
     print user.id        
     print user.name
     faceDetect = cv2.CascadeClassifier('haarcascade_frontalface_default.xml');
     rec =  cv2.createLBPHFaceRecognizer()
     rec.load('recognizer/trainningData.yml')
     ids = 0
     font = cv2.FONT_HERSHEY_SIMPLEX
     while (True):
         ret, img = cap.read();
         if not ret: continue
         if len(img.shape) == 3 or len(img.shape) == 4:
           gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)              
         else:
           gray = img         
         faces = faceDetect.detectMultiScale(gray, 2, 5);
         x=0
         y=0
         w=0
         h=0
         for (x, y, w, h) in faces:
             cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
             ids, conf = rec.predict(gray[y:y + h, x:x + w])
             idss=ids
             #print ids,":",conf
             if (conf < 70):
              ids = user.name[ids-1]
             else:
              ids = "stranger"
             print ids,":",conf
             names=ids
             path_img=os.path.join('dataSet/'+str(idss),names)
             print path_img
             Exist2=os.path.exists(path_img)
             if Exist2:
               img2=cv2.imread(path_img+ids+'.jpg')
             #cv2.cvtColor(img2, cv2.COLOR_BGR2RGB, img)
               cv2.imshow('face',img2)
             #self.image2=QImage()
             #self.label_img.setPixmap(QPixmap.fromImage(self.image2).scaled(self.label_img.size(),Qt.KeepAspectRatio,Qt.SmoothTransformation))
     #self.ncImage=QImage('../图/2.jpg')
     #self.label_img.setPixmap(QPixmap.fromImage(self.ncImage).scaled(self.label_img.size(),Qt.KeepAspectRatio,Qt.SmoothTransformation))                 
         cv2.putText(img, str(ids), (x, y + h), font, 1,(0,255,255))
         height, width, bytesPerComponent = img.shape
         bytesPerLine = bytesPerComponent * width
         cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)
         self.image=QImage(img.data,width,height,bytesPerLine,QImage.Format_RGB888)
         self.label_camera.setPixmap(QPixmap.fromImage(self.image).scaled(self.label_camera.width(),self.label_camera.height()))
         #cv2.imshow("Face", img);
         if (cv2.waitKey(1) == ord('q')):
             break
         cv2.destroyAllWindows()
def input():
    user = name()
    filename = 'write_data.txt'

    with open(filename, 'a') as f:
        id = input("Please input an id: ")
        name = input("please input a name: ")
        add(id, name)
        f.write((str)(user.id[user.index - 1]) + " " +
                user.name[user.index - 1])
        f.write("\n")
Exemple #3
0
    def capture(self):
        import cv2
        import numpy as np

        from Name import name
        user = name()
        with open('write_data.txt') as f:
            for line1 in f:
                info = line1.split()
                user.id.append(info[0])
                user.name.append(info[1])

        faceDetect = cv2.CascadeClassifier(
            'haarcascade_frontalface_default.xml')
        #cam = cv2.VideoCapture(0);
        rec = cv2.createLBPHFaceRecognizer()
        rec.load('recognizer/trainningData.yml')
        id = 0
        font = cv2.FONT_HERSHEY_SIMPLEX
        while (True):
            #if (self.cap.isOpened()):
            ret, img = self.cap.read()
            if not ret: continue
            if len(img.shape) == 3 or len(img.shape) == 4:
                gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

            else:
                gray = img
            # gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            faces = faceDetect.detectMultiScale(gray, 1.3, 5)
            x = 0
            y = 0
            h = 0
            for (x, y, w, h) in faces:
                cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
                id, conf = rec.predict(gray[y:y + h, x:x + w])
                if (conf < 70):
                    id = user.name[id - 1]
                else:
                    id = "stranger"
            cv2.putText(img, str(id), (x, y + h), font, 1, (0, 255, 255))
            height, width, bytesPerComponent = img.shape
            bytesPerLine = bytesPerComponent * width
            cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)
            self.image = QImage(img.data, width, height, bytesPerLine,
                                QImage.Format_RGB888)
            self.label_camera.setPixmap(
                QPixmap.fromImage(self.image).scaled(
                    self.label_camera.width(), self.label_camera.height()))
            cv2.imshow("Face", img)
            if (cv2.waitKey(1) == ord('q')):
                break
        #
            cv2.destroyAllWindows()
def create(a, b):
    from Name import name
    user = name()
    filename = 'write_data.txt'
    with open(filename, 'a') as f:
        #id = input("Please input an id: ")
        #Sname = input("please input a name: ")
        id = a
        user.id.append(a)
        user.name.append(b)
        user.index = user.index + 1
        f.write((str)(user.id[user.index - 1]) + " " +
                user.name[user.index - 1])
        f.write("\n")

    faceDetect = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
    #cam = cv2.VideoCapture(0)
    sampleNum = 0
    while (True):
        ret, img = self.cap.read()
        if len(img.shape) == 3 or len(img.shape) == 4:
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        else:
            gray = img
        faces = faceDetect.detectMultiScale(gray, 1.3, 5)
        for (x, y, w, h) in faces:
            sampleNum = sampleNum + 1
            cv2.imwrite(
                "dataSet/User." + str(id) + "." + str(sampleNum) + ".jpg",
                gray[y:y + h, x:x + w])
            cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
            cv2.waitKey(100)
        cv2.imshow("Face", img)
        cv2.waitKey(1)
        cv2.destroyAllwindows()
        if (sampleNum > 100):
            break
        #cv2.destroyAllWindows()
    cam.release()
Exemple #5
0
#-*-coding:utf8-*-
import cv2
import numpy as np

from Name import name
user = name()
with open('write_data.txt') as f:
    for line1 in f:
        info = line1.split()
        user.id.append(info[0])
        user.name.append(info[1])

faceDetect = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cam = cv2.VideoCapture(0)
rec = cv2.createLBPHFaceRecognizer()
rec.load(
    '/home/zsf/桌面/FaceRecognizer-master (3)/faceRecogination/recognizer/trainningData.yml'
)
id = 0
font = cv2.FONT_HERSHEY_SIMPLEX
while (True):
    ret, img = cam.read()
    if not ret: continue
    if len(img.shape) == 3 or len(img.shape) == 4:
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    else:
        gray = img
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    faces = faceDetect.detectMultiScale(gray, 1.3, 5)
    for (x, y, w, h) in faces:
        cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)