Exemple #1
0
def init(kfn, kfe):

    #temp vars : names, flag, floc, fe, ctr, prev_name, video, width, height, vname, out,
    #            rframe, rgbrframe, matched_faces, fdist, best_match, name, gray,                   

    names = []
    flag = True
    floc = []
    fe = []
    ctr=0
    name = " "
    prev_name = " "

    print('Press " e " | " r " to enroll..')

    video = cv2.VideoCapture(0) #start cam

    width = int(video.get(3))
    height = int(video.get(4))
    vname  = 'output.avi'

    out = cv2.VideoWriter(vname,cv2.VideoWriter_fourcc('M','J','P','G'), 10, (width,height))

    while(1):
        ret,frame = video.read() #grab frame by frame while(1)

        rframe = cv2.resize(frame,(0,0),fx=0.25,fy=0.25) #not needed ,
                                                    #just to make the process faster 

        rgbrframe = cv2.cvtColor(rframe,cv2.COLOR_BGR2RGB)#cv2 uses BGR color whereas,
                                #face_rec uses RGB , so reverse content

        out.write(frame) # write  to videoLog

        if flag:
            floc = face_rec.face_locations(rgbrframe) # grab face from frame
            fe   = face_rec.face_encodings(rgbrframe,floc) # grab face encodings from frame 

            for fenc in fe:
                matched_faces = face_rec.compare_faces(kfe,fenc)

                fdist = face_rec.face_distance(kfe,fenc)
                best_match = np.argmin(fdist)
                if matched_faces[best_match]:
                    try:
                        name = kfn[best_match]
                        if prev_name != name:
                            print(name + ' - '+str(datetime.datetime.now()))
                        prev_name = name
                    except Exception:
                        pass
                else:
                    name = 'Unknown'
                    if prev_name != name:
                        print('\t!!! Security Alert !!!\n\t\tDetected ' + name + '- ' + str(datetime.datetime.now()))
                    prev_name = name
                    gray = cv2.cvtColor(rframe,cv2.COLOR_BGR2GRAY)#converting unknowface frame to gray
                    faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")#haarcascade unknown person front face
                    faces = faceCascade.detectMultiScale(gray,
                            scaleFactor=1.3,
                            minNeighbors=3,
                            minSize=(50, 50)
                    )
                    if len(faces):#faces contails multiple unknown faces in single frame 
                        for (x, y, w, h) in faces:
                            roi_color = rframe[y : y + 240 , x : x + 360] #cropping unknown face
                            ctr = ctr + 1
                            path = 'unknownDetected/'
                            cv2.imwrite(os.path.join(path , str(ctr)+'Unknown_Face.jpg'), roi_color) #Saving cropped image of unknown to unknownDetected Folder
                            filename = path + str(ctr)+'Unknown_Face.jpg'
                            f1 = face_rec.load_image_file(filename)     
                            try:
                                f1_encoding =  face_rec.face_encodings(f1)[0]#Encoding unknown face 
                            except: pass
                            kfe.append(f1_encoding)
                            s = 'Suspect'+str(ctr) #Detected Unknown face encoded as Suspect :)
                            kfn.append(s)
    
        r1 = r.randint(0,100):
        if r1 / 2 == 0:
            s.sframes(name,frame)
        flag = not flag
        names += [name]
        cv2.imshow('Video', frame) #show frames as being processed.

        print('Here',*names)    
        #No markings are done on live frames so as to reduce the flickering on screen/video feed. Instead, as like on generic systems, results are shown on a terminal/console.

        #TODO : Show on Text Area while adding Flask front-end.

    
        if cv2.waitKey(1) & 0xFF == ord('q'):
            out.release()
            video.release()
            cv2.destroyAllWindows()
            break

        if cv2.waitKey(1) & 0XFF == ord('e') or cv2.waitKey(1) & 0XFF == ord('r'):
            print('Begin Enrollment....')
            video.release()
            cv2.destroyAllWindows()
            e.enroll()
            s.store(names)
            init()

    return
    rframe = cv2.resize(frame,(0,0),fx=0.25,fy=0.25) #not needed ,
                                                    #just to make the process faster 

    rgbrframe = cv2.cvtColor(rframe,cv2.COLOR_BGR2RGB)#cv2 uses BGR color whereas,
                                #face_rec uses RGB , so reverse content

    if flag:
        floc = face_rec.face_locations(rgbrframe) # grab face from frame
        fe   = face_rec.face_encodings(rgbrframe,floc) # grab face encodings from frame 
        
        names = []
        for fenc in fe:
            matched_faces = face_rec.compare_faces(kfe,fenc)
            name = 'Unknown'

            fdist = face_rec.face_distance(kfe,fenc)
            best_match = np.argmin(fdist)
            if matched_faces[best_match]:
                name = kfn[best_match]

            names.append(name)
    flag = not flag

    # Display the results
    for (top, right, bottom, left), name in zip(floc, names):
        top *= 4    # resize image back again by *0.25
        right *= 4
        bottom *= 4
        left *= 4

        cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 2)# Draw a box around the face
def findout(image, iname):

    print(image)
    url = 'rtsp://192.168.43.1:8080/h264_ulaw.sdp'
    # url = 'http://192.168.43.1:8080/video'
    video = cv2.VideoCapture(url)

    #loading sample pictures
    f1 = face_rec.load_image_file(image)

    #learn how to recognise it
    f1_encoding = face_rec.face_encodings(f1)[0]

    #array for known encodings
    kfe = [f1_encoding]

    #array for known face names
    kfn = [iname]

    names = []
    flag = True
    floc = []
    fe = []
    count = 0
    while (1):

        ret, frame = video.read()  #grab frame by frame while(1)

        rframe = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)  #not needed ,
        #just to make the process faster

        rgbrframe = cv2.cvtColor(
            rframe, cv2.COLOR_BGR2RGB)  #cv2 uses BGR color whereas,
        #face_rec uses RGB , so reverse content

        if flag:
            floc = face_rec.face_locations(rgbrframe)  # grab face from frame
            fe = face_rec.face_encodings(
                rgbrframe, floc)  # grab face encodings from frame

            names = []
            for fenc in fe:
                matched_faces = face_rec.compare_faces(kfe, fenc)
                name = 'Unknown'

                fdist = face_rec.face_distance(kfe, fenc)
                best_match = np.argmin(fdist)
                if matched_faces[best_match]:
                    name = kfn[best_match]

                names.append(name)
        flag = not flag

        # Display the results
        for (top, right, bottom, left), name in zip(floc, names):
            top *= 4  # resize image back again by *0.25
            right *= 4
            bottom *= 4
            left *= 4

            cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0),
                          4)  # Draw a box around the face
            cv2.rectangle(frame, (left, bottom - 35), (right, bottom),
                          (0, 0, 0), cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0,
                        (255, 255, 255), 1)  #label the face

        h, w, l = frame.shape
        new_h = int(h / 2)
        new_w = int(w / 2)
        rzframe = cv2.resize(frame, (new_w, new_h))
        cv2.imshow('Cam_feed', rzframe)
        count += 1

        if cv2.waitKey(1) & 0xFF == ord('q'):
            print('processed ', count, 'frames')
            video.release()
            cv2.destroyAllWindows()
            break