コード例 #1
0
ファイル: align_faces.py プロジェクト: skyli42/RiceKrispies
def ff(img):
    """
    :param img:
    :return: faceAligned[0] the aligned face version of the original input image
    """
    # initialize dlib's face detector (HOG-based) and then create
    # the facial landmark predictor and the face aligner
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(str(_path) + "/shape_predictor_68_face_landmarks.dat") ########################
    fa = FaceAligner(predictor, desiredFaceWidth=256)

    # load the input image, resize it, and convert it to grayscale
    image = img ##########################################################
    image = imutils.resize(image, width=800)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # show the original input image and detect faces in the grayscale
    # image
    #cv2.imshow("Input", image)
    rects = detector(gray, 2)

    lst = []
    # loop over the face detections
    for rect in rects:
        # extract the ROI of the *original* face, then align the face
        # using facial landmarks
        (x, y, w, h) = rect_to_bb(rect)
        faceOrig = imutils.resize(image[y:y + h, x:x + w], width=256)
        faceAligned = fa.align(image, gray, rect)

        import uuid
        f = str(uuid.uuid4())
        cv2.imwrite("foo/" + f + ".png", faceAligned)

        # display the output images
        #cv2.imshow("Original", faceOrig)
        #cv2.imshow("Aligned", faceAligned)
        #cv2.waitKey(0)
        lst.append(faceAligned)

    return lst[0]
コード例 #2
0
        if not is_exist_user:
            user_data.append(entry)

        user_data = sorted(user_data, key=lambda user_key: user_key['id'], reverse=False)
        print("[DEBUG] user_date with index = {}".format(user_data))

        with open(user_data_file, mode='w') as output_file:
            output_file.write(json.dumps(user_data, indent=4))
            output_file.close()

        break

    # if the `s` key was pressed save the first found face
    if key == ord('s'):
        if len(faces) > 0:
            face_aligned = fa.align(frame, gray_frame, faces[0])
            image_name = "{}{}.png".format(base_dir, str(datetime.datetime.now()).replace(" ", "_").replace(":", "."))
            print("[DEBUG] image_name = {}".format(image_name))
            # save image
            cv2.imwrite(image_name, face_aligned)
            # show image
            cv2.imshow(image_name, face_aligned)

    # loop over the face detections
    for face in faces:
        (x, y, w, h) = face_utils.rect_to_bb(face)
        cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)

    # show the frame
    cv2.imshow("Face Detection Window - S: Save/Capture, Q: Quit", frame)
コード例 #3
0
def test():
    # 取得dlib預設的臉部偵測器
    detector = dlib.get_frontal_face_detector()
    # 根據shape_predictor方法載入68個特徵點模型,此方法為人臉表情識別的偵測器
    predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
    # 臉部校正
    fa = FaceAligner(predictor, desiredFaceWidth=256)
    # 載入人臉辨識檢測器
    facerec = dlib.face_recognition_model_v1(
        "dlib_face_recognition_resnet_model_v1.dat")
    # 比對人臉描述子列表
    descriptors = []
    # 比對人臉名稱列表
    candidate = []
    # 開啟影片檔案
    cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
    # 比對人臉圖片資料夾名稱
    faces_folder_path = "./rec"
    WHO = None
    Erro = 0

    # 有讀過的照片存成新的檔可直接使用,有新的圖片再加

    # 有讀過的照片存成新的檔可直接使用,有新的圖片再加

    # 路徑中的file個數
    NUM_OF_FILES = 0  # 檔案數
    for fn in os.listdir(faces_folder_path):
        NUM_OF_FILES += 1
    # print(NUM_OF_FILES)

    person = [0 for i in range(NUM_OF_FILES)]
    # print(person)

    # 原本的個數
    f = open("numOfFiles.txt", encoding="utf-8")
    num_o = f.read()
    # print(type(num_o))
    num_o = int(num_o)
    # print(type(num_o))
    # print(num_o)

    if num_o != NUM_OF_FILES:
        # 把新的個數寫到txt檔裡
        num = open('numOfFiles.txt', 'w', encoding="utf-8")
        num.write(str(NUM_OF_FILES))
        num.close()

        # 跑新的
        # 讀取資料夾裡的圖片及檔案名(人名),並將每張圖的128維特徵向量存到description一維陣列中
        for f in glob.glob(os.path.join(faces_folder_path, "*.jpg")):
            base = os.path.basename(f)
            # 依序取得圖片檔案人名,存到candidate一維陣列中
            candidate.append(os.path.splitext(base)[0])
            img = io.imread(f)
            gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
            # 1.人臉偵測
            face_rects = detector(img, 0)
            # print(face_rects)
            for index, face in enumerate(face_rects):
                # 2.人臉校正
                faceAligned = fa.align(img, gray, face)
                # 3.再人臉偵測(去除校正後多餘的部分)
                face_rects2 = detector(faceAligned, 1)
                for index2, face2 in enumerate(face_rects2):
                    ax1 = face2.left()
                    ay1 = face2.top()
                    ax2 = face2.right()
                    ay2 = face2.bottom()
                    # 4.68特徵點偵測
                    shape = predictor(faceAligned, face2)
                    # 5.取得描述子,128維特徵向量
                    face_descriptor = facerec.compute_face_descriptor(
                        faceAligned, shape)
                    # 轉換numpy array格式
                    v = np.array(face_descriptor)
                    descriptors.append(v)
                    # print(face_rects)
        # 存新的到原本的txt檔
        np.savetxt('desc_txt.txt', descriptors)
        c = open("cand.txt", "w", encoding="utf-8")
        str_list = [line + '\n' for line in candidate]
        c.writelines(str_list)
        c.close()

    # 以迴圈從影片檔案讀取影格,並顯示出來
    while cap.isOpened():
        desc_list = np.loadtxt("./desc_txt.txt")
        k = open("cand.txt", encoding="utf-8")
        can = k.readlines()
        x1 = 0
        x2 = 0
        y1 = 0
        y2 = 0
        # 從視訊鏡頭擷取畫面
        ret, frame = cap.read()
        # 縮小圖片
        frame = imutils.resize(frame, width=800)
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
        # 1.人臉偵測
        face_rects = detector(frame, 1)
        # 取出所有偵測的結果(所有人臉座標點)
        for index, rect in enumerate(face_rects):
            x1 = rect.left()
            y1 = rect.top()
            x2 = rect.right()
            y2 = rect.bottom()
            # 以方框標示偵測的人臉
            cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 4,
                          cv2.LINE_AA)
            # 2.人臉校正
            faceAligned = fa.align(frame, gray, rect)
            # 3.再人臉偵測(去除校正後多餘的部分)
            face_rects2 = detector(faceAligned, 1)
            # 取出所有偵測的結果(所有人臉座標點)
            for index2, rect2 in enumerate(face_rects2):
                ax1 = rect2.left()
                ay1 = rect2.top()
                ax2 = rect2.right()
                ay2 = rect2.bottom()
                # 4.68特徵點偵測
                shape = predictor(faceAligned, rect2)
                # 5.取得描述子,128維特徵向量
                face_descriptor = facerec.compute_face_descriptor(
                    faceAligned, shape)
                # 轉換numpy array格式
                d_test = np.array(face_descriptor)
                # 計算歐式距離  (與圖片庫裡,各個人臉間的距離)(5張照片就有5個距離)
                # 清空 存放人臉距離的陣列
                dist = []
                for index in desc_list:
                    # 計算距離
                    dist_ = np.linalg.norm(index - d_test)
                    # 加入陣列
                    dist.append(dist_)
                # print(dist)
                # 辨識人名
                if dist != []:
                    # 將比對人名和比對出來的歐式距離組成一個dict
                    c_d = dict(zip(can, dist))
                    # 根據歐式距離由小到大排序 [("名字",距離)]二微陣列
                    cd_sorted = sorted(c_d.items(), key=lambda d: d[1])
                    # print(cd_sorted)
                    # 歐式距離(0~1)越小越像,設定0.5作為最低辨識標準
                    if cd_sorted[0][1] < 0.4:
                        rec_name = cd_sorted[0][0]
                    else:
                        rec_name = "No Data"
                        Erro = Erro + 1
                        if Erro == 6:
                            WHO = "No Data"
                            cap.release()
                            cv2.destroyAllWindows()
                            break

                imgPil = Image.fromarray(frame)
                font = ImageFont.truetype("C:/Windows/Fonts/msjh.ttc", 20)
                draw = ImageDraw.Draw(imgPil)
                draw.fontmode = '1'  # 關閉反鋸齒
                draw.text((x1, y1 - 20),
                          rec_name,
                          font=font,
                          fill=(255, 255, 255))
                frame = np.array(imgPil)

                for index, name in enumerate(can):
                    if rec_name == name:
                        person[index] = person[index] + 1
                        # print(person)
                        if person[index] == 4:
                            WHO = can[index]
                            # 標示辨識的人名(中文)
                            if WHO is not None:
                                # imgPil.show()
                                print(WHO)
                                time.sleep(5)
                                cap.release()
                                cv2.destroyAllWindows()
                                return WHO
                            break
                    elif sum(person) > 8:
                        WHO = "No Data"
                        cap.release()
                        cv2.destroyAllWindows()
                        break

            # 標示辨識的人名(只能標示英文)
            # cv2.putText(frame, rec_name, (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
        # CV2是用BGR
        frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
        """
        # FPS
        # Find OpenCV version
        (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
    
        if int(major_ver) < 3:
            fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
            print("Frames per second using video.get(cv2.cv.CV_CAP_PROP_FPS): {0}".format(fps))
        else:
            fps = cap.get(cv2.CAP_PROP_FPS)
            print("Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}".format(fps))
        """
        # 顯示結果
        cv2.imshow("Face Detection", frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cap.release()
    cv2.destroyAllWindows()
コード例 #4
0
count = 0

while number_of_images < MAX_NUMBER_OF_IMAGES:
    ret, frame = video_capture.read()

    frame = cv2.flip(frame, 1)

    frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    #faces = face_cascade.detectMultiScale(frame, 1.3, 5)
    faces = detector(frame_gray)
    if len(faces) == 1:
        face = faces[0]
        (x, y, w, h) = face_utils.rect_to_bb(face)
        face_img = frame_gray[y - 50:y + h + 100, x - 50:x + w + 100]
        face_aligned = face_aligner.align(frame, frame_gray, face)

        if count == 5:
            cv2.imwrite(
                os.path.join(directory,
                             str(name + "_" + str(number_of_images) + '.jpg')),
                face_aligned)
            number_of_images += 1
            count = 0
        print(count)
        count += 1

    cv2.imshow('Video', frame)

    if (cv2.waitKey(1) & 0xFF == ord('q')):
        break
コード例 #5
0
    right = bb[0] + bb[2]
    bottom = bb[1] + bb[3]
    return np.array([top, right, bottom, left])


detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
fa = FaceAligner(predictor, desiredFaceWidth=256)

cap = cv2.VideoCapture(0)

while (True):
    ret, frame = cap.read()
    cv2.imshow('frame', frame)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    rects = detector(gray, 2)
    for rect in rects:
        (x, y, w, h) = rect_to_bb(rect)
        faceOrig = imutils.resize(frame[y:y + h, x:x + w], width=256)
        faceAligned = fa.align(frame, gray, rect)
        faceAligned = fa.align(frame, gray, rect)
        cv2.imshow("Original", faceOrig)
        cv2.imshow("Aligned", faceAligned)

    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

cap.release()
cv2.destroyAllWindows()
コード例 #6
0
def create_dataset(username):
    id = username
    if (os.path.exists(
            'face_recognition_data/training_dataset/{}/'.format(id)) == False):
        os.makedirs('face_recognition_data/training_dataset/{}/'.format(id))
    directory = 'face_recognition_data/training_dataset/{}/'.format(id)

    # Detect face
    #Loading the HOG face detector and the shape predictpr for allignment

    print("[INFO] Loading the facial detector")
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(
        'face_recognition_data/shape_predictor_68_face_landmarks.dat'
    )  #Add path to the shape predictor ######CHANGE TO RELATIVE PATH LATER
    fa = FaceAligner(predictor, desiredFaceWidth=96)
    #capture images from the webcam and process and detect the face
    # Initialize the video stream
    print("[INFO] Initializing Video stream")
    vs = VideoStream(src=0).start()
    #time.sleep(2.0) ####CHECK######

    # Our identifier
    # We will put the id here and we will store the id with a face, so that later we can identify whose face it is

    # Our dataset naming counter
    sampleNum = 0
    # Capturing the faces one by one and detect the faces and showing it on the window
    while (True):
        # Capturing the image
        #vs.read each frame
        frame = vs.read()
        #Resize each image
        frame = imutils.resize(frame, width=800)
        #the returned img is a colored image but for the classifier to work we need a greyscale image
        #to convert
        gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        #To store the faces
        #This will detect all the images in the current frame, and it will return the coordinates of the faces
        #Takes in image and some other parameter for accurate result
        faces = detector(gray_frame, 0)
        #In above 'faces' variable there can be multiple faces so we have to get each and every face and draw a rectangle around it.

        for face in faces:
            print("inside for loop")
            (x, y, w, h) = face_utils.rect_to_bb(face)

            face_aligned = fa.align(frame, gray_frame, face)
            # Whenever the program captures the face, we will write that is a folder
            # Before capturing the face, we need to tell the script whose face it is
            # For that we will need an identifier, here we call it id
            # So now we captured a face, we need to write it in a file
            sampleNum = sampleNum + 1
            # Saving the image dataset, but only the face part, cropping the rest

            if face is None:
                print("face is none")
                continue

            cv2.imwrite(directory + '/' + str(sampleNum) + '.jpg',
                        face_aligned)
            face_aligned = imutils.resize(face_aligned, width=400)
            #cv2.imshow("Image Captured",face_aligned)
            # @params the initial point of the rectangle will be x,y and
            # @params end point will be x+width and y+height
            # @params along with color of the rectangle
            # @params thickness of the rectangle
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 1)
            # Before continuing to the next loop, I want to give it a little pause
            # waitKey of 100 millisecond
            cv2.waitKey(50)

        #Showing the image in another window
        #Creates a window with window name "Face" and with the image img
        cv2.imshow("Add Images", frame)
        #Before closing it we need to give a wait command, otherwise the open cv wont work
        # @params with the millisecond of delay 1
        cv2.waitKey(1)
        #To get out of the loop
        if (sampleNum > 300):
            break

    #Stoping the videostream
    vs.stop()
    # destroying all the windows
    cv2.destroyAllWindows()
コード例 #7
0
def register_yourself(student_id, frame_num, image_num, id_idx):

    try:
        os.makedirs(DATA_PATH)
    except:
        pass

    #Loading the stored face encodings and corresponding IDs
    try:
        with open(os.path.join(STORAGE_PATH, "known_face_ids.pickle"),
                  "rb") as fp:
            known_face_ids = pickle.load(fp)
        with open(os.path.join(STORAGE_PATH, "known_face_encodings.pickle"),
                  "rb") as fp:
            known_face_encodings = pickle.load(fp)
    except:
        known_face_encodings = []
        known_face_ids = []

    mpl.rcParams['toolbar'] = 'None'

    print("[INFO] Loading Face Detector")
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(LANDMARK_PATH)
    fa = FaceAligner(predictor, desiredFaceWidth=96)

    print("[INFO] Initializing Video stream")
    vs = cv2.VideoCapture(0)  #,cv2.CAP_DSHOW)

    #Uncommnet below to create the folder for the images
    '''
    IMAGE_PATH = os.path.join(DATA_PATH, student_id)
    try:
        os.makedirs(IMAGE_PATH)
    except:
        pass
    '''
    #Entry time
    tin = time.time()

    #frame = vs.read()
    #fig = plt.figure()
    #plot = plt.subplot(1,1,1)
    #plt.title("Detecting Face")
    #plt.axis('off')
    #im1 = plot.imshow(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))

    while image_num < 10:  # Take 10 images

        _, frame = vs.read()
        frame_num += 1

        #Resize each image
        #frame = cv2.resize(frame ,(600,600))

        #Applying face enhancement steps
        frame = imageEnhancement.adjust_gamma(frame, gamma=1.5)
        gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        #Detecting faces in the frame
        faces = detector(gray_frame, 0)

        for face in faces:

            if face is None:
                print("face is none")
                continue

            #Capture the face and align it using the face aligner
            (x, y, w, h) = rect_to_bb(face)
            face_aligned = fa.align(frame, gray_frame, face)
            face_aligned = cv2.resize(face_aligned, (600, 600))

            # @params the initial point of the rectangle will be x,y and
            # @params end point will be x+width and y+height
            # @params along with color of the rectangle
            # @params thickness of the rectangle
            #Put a bounding box over detected face
            frame = cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0),
                                  1)

            #cv2.imshow("Image Captured",frame)
            #cv2.waitKey(50)

        #plt.ion()
        #im1.set_data(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
        #plt.pause(0.001)
        #plt.show()

        if (frame_num % 30 == 0):

            #Uncommnet the line below to store the face images
            #cv2.imwrite(IMAGE_PATH + "/{}_".format(student_id) + str(j) + ".jpg", face_aligned)

            #Appending the face encodings and corresponding IDs
            try:
                known_face_encodings.append(
                    face_recognition.face_encodings(frame)[0])
                known_face_ids.append(student_id)
            except:
                continue
            image_num += 1

        #OpenCV's implementation to show an image in window(doesn't work on production server)
        #cv2.imshow("Capturing Images for registration (PRESS Q TO QUIT",frame)

        #Encoding the frame to be stream into browser
        frame = cv2.imencode('.jpg', frame)[1].tobytes()
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')

        #if(cv2.waitKey(1) == ord("q")):
        #    break

    #Storing the face encodings and corresponding IDs to disk
    with open(os.path.join(STORAGE_PATH, "known_face_ids.pickle"), "wb") as fp:
        pickle.dump(known_face_ids, fp)
    with open(os.path.join(STORAGE_PATH, "known_face_encodings.pickle"),
              "wb") as fp:
        pickle.dump(known_face_encodings, fp)

    #Noting the number of pictures already captured and storing the index
    id_idx[student_id] = image_num
    with open(os.path.join(STORAGE_PATH, "id_idx.json"), "w") as outfile:
        json.dump(id_idx, outfile)

    #Exit time
    tout = time.time()
    print(tout - tin)

    #plt.close()

    #Releasing the videostream
    vs.release()
    cv2.destroyAllWindows()
    return True
コード例 #8
0
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("facerec.dat")
fa = FaceAligner(predictor, desiredFaceWidth=256)

#Load Input
al_image = cv2.imread(args["image"])
al_image = imutils.resize(al_image, width=800)
al_gray = cv2.cvtColor(al_image, cv2.COLOR_BGR2GRAY)
al_rects = detector(al_gray, 2)

# loop over the face detections
for rect in al_rects:

    #Format Conversion DLIB and openCV, store new image in faceAligned
    (x, y, w, h) = rect_to_bb(rect)
    faceAligned = fa.align(al_image, al_gray, rect)

#now that we have the fixed image, lets get our facial landmarks
image = faceAligned
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 1)

#Getting the right region for the face
for (i, rect) in enumerate(rects):
    shape = predictor(gray, rect)
    shape = face_utils.shape_to_np(shape)

    #Looping over the coordinates for the facial landmarks and display them on image
    for (x, y) in shape:
        cv2.circle(image, (x, y), 1, (0, 0, 255), -1)
コード例 #9
0
            M[1, 2] += (tY - eyesCenter[1])
            # apply the affine transformation
            (w, h) = (self.desiredFaceWidth, self.desiredFaceHeight)
            output = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC)
            # apply the affine transformation
            (w, h) = (self.desiredFaceWidth, self.desiredFaceHeight)
            output = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC)
            print(img_name + " was aligned.")
            cv2.imwrite('../data/aligned/' + 'aligned_' + str(c) + img_name,
                        output)
        return


fa = FaceAligner(desiredFaceWidth=256)


def wrap(row):
    fa.align(row['urls'], row['face_coords'])


if len(sys.argv) == 1:
    path = '../data/images/'
    files = os.listdir(path)
    for img_name in files:
        ans = fa.align(img_name)
        cv2.imwrite('../data/aligned/' + 'aligned_' + str(c) + img_name,
                    output)
else:
    data = pd.read_csv(sys.argv[1], encoding='utf-8')
    clusters = data['cluster']
    imgs = data.apply(wrap, axis=1)
コード例 #10
0
        # compute the bounding box of the face and draw it on the
        # frame

        (bX, bY, bW, bH) = face_utils.rect_to_bb(rect)
        cv2.rectangle(frame, (bX, bY), (bX + bW, bY + bH), (0, 255, 0), 1)
        #cropped_image = frame[bX:bX+5*bH,bY:bY+5*bW]
        #cv2.namedWindow("cropped image",2)
        #cv2.imshow("cropped image",cropped_image)

        # determine the facial landmarks for the face region, then
        # convert the facial landmark (x, y)-coordinates to a NumPy
        # array
        shape = predictor(gray_frame, rect)
        shape = face_utils.shape_to_np(shape)
        if cv2.waitKey(1) & 0xFF == ord('c'):
            faceAligned = fa.align(dup_frame, gray_frame, rect)
            capture = "image_" + str(randint(0, 125)) + "_" + str(
                randint(0, 125))
            cv2.putText(frame, str("Capturing Image"), (x - 10, y - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 4)
            cv2.imwrite(
                "C:/Users/rai_a/PycharmProjects/final_year_face_recogn/aligned_image_data/"
                + capture + ".jpg", faceAligned)
            print("Capturing in process", end="", flush=True)
            time.sleep(2)
            print("\nCapture Complete")
            cv2.putText(frame, str("Capture Complete, Starting Processing"),
                        (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.8,
                        (255, 255, 255), 4)

            cv2.destroyAllWindows()
コード例 #11
0
ファイル: demo.py プロジェクト: ywadea/CMPT726
def main():
    global Result, waitting, current_face
    threads = []
    waitting = False  # can be passed to the TF
    period = 0

    args = get_args()
    depth = args.depth
    k = args.width

    # for face detection
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
    fa = FaceAligner(predictor, desiredFaceWidth=200)

    # load model and weights
    img_size = 200

    # capture video
    cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)

    while True:
        # get video frame
        ret, img = cap.read()

        if not ret:
            print("error: failed to capture image")
            return -1

        input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        input_img2 = input_img.astype(np.float32)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img_h, img_w, _ = np.shape(input_img)

        # detect faces using dlib detector
        detected = detector(input_img, 1)
        faces = np.empty((len(detected), img_size, img_size, 3))

        for i, d in enumerate(detected):
            x1, y1, x2, y2, w, h = d.left(), d.top(
            ), d.right() + 1, d.bottom() + 1, d.width(), d.height()
            xw1 = max(int(x1 - 0.4 * w), 0)
            yw1 = max(int(y1 - 0.4 * h), 0)
            xw2 = min(int(x2 + 0.4 * w), img_w - 1)
            yw2 = min(int(y2 + 0.4 * h), img_h - 1)
            cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 2)
            # cv2.rectangle(img, (xw1, yw1), (xw2, yw2), (255, 0, 0), 2)
            faces[i, :, :, :] = fa.align(input_img, gray, detected[i])
            # faces[i,:,:,:] = cv2.resize(img[yw1:yw2 + 1, xw1:xw2 + 1, :], (img_size, img_size))
            # add = 'tmp_{}.jpg'.format(i)
            #

            #cv2.imwrite('tmp_{}.jpg'.format(i), faces, [int(cv2.IMWRITE_JPEG_QUALITY), 90])
            #cv2.imwrite('tmp_{}.jpg'.format(i), faces, [int(cv2.IMWRITE_JPEG_QUALITY), 90])

        if len(detected) > 0:
            period += 1
            if not waitting:
                race = np.zeros(len(detected))
                gender = np.zeros(len(detected))
            if not waitting and period == 10:  # first time a person come to camera
                period = 0
                waitting = True
                # image = align_face(img)
                # predict ages and genders of the detected faces
                for i, d in enumerate(detected):

                    # gender[i], race[i] = predictor(faces[i, :, :, :])
                    cv2.imwrite('tmp_{}.jpg'.format(i), faces[i, :, :, :],
                                [int(cv2.IMWRITE_JPEG_QUALITY), 180])
                    # cv2.imwrite('tmp_{}.jpg'.format(i), image, [int(cv2.IMWRITE_JPEG_QUALITY), 90])
                    # cv2.imwrite('tmp_{}.jpg'.format(i), gray, [int(cv2.IMWRITE_JPEG_QUALITY), 90])
                    # gender[i], race[i] = predictor2(img)

                    imgp = './tmp_{}.jpg'.format(i)
                    image_tru = load_image(imgp)
                    # predictor2(image_tru,  len(detected))
                    thread = threading.Thread(target=predictor2,
                                              args=(
                                                  image_tru,
                                                  len(detected),
                                              ))
                    # if thread.isAlive():
                    threads.append(thread)
                    print('thread created!')

                current_face = 0
                thread.start()

            if Result:
                gender[current_face] = Result[0] + 1
                race[current_face] = Result[1] + 1
                print(race, gender)
                current_face += 1
                Result = None

        if len(detected) == 0:
            waitting = False
            Result = None
            period = 0
            threads = []

        # draw results
        PRase = ['-', 'W', 'B', 'A', 'I', 'O']
        PGender = ['-', 'M', 'F']
        for i, d in enumerate(detected):
            label = "{}, {}".format(PRase[int(race[i])],
                                    PGender[int(gender[i])])
            draw_label(img, (d.left(), d.top()), label)

        cv2.imshow("result", img)
        key = cv2.waitKey(1)

        if key == 27:
            break
コード例 #12
0
ファイル: demo.py プロジェクト: nghiabka/Age_Gender_Estimator
def main(model):
    """

    """
    depth = 16
    k = 8
    # for face detection
    detector = dlib.get_frontal_face_detector()
    # tt = time.time()
    predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
    fa = FaceAligner(predictor, desiredFaceWidth=160)
    # print("time align: ", time.time() - tt)

    # print(fa)
    # print(type(fa))


    # load model and weights
    img_size = 160

    # capture video
    cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1140)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 980)

    start_time = time.time()

    count_frame = 0
    while True:
        # get video frame
        t1 = time.time()
        ret, img = cap.read()
        if not ret:
            print("error: failed to capture image")
            return -1
        input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img_h, img_w, _ = np.shape(input_img)

        # detect faces using dlib detector
        detected = detector(input_img, 1)
        # print(detected)
        # print(type(detected))
        faces = np.empty((len(detected), img_size, img_size, 3))

        for i, d in enumerate(detected):

            x1, y1, x2, y2, w, h = d.left(), d.top(), d.right() + 1, d.bottom() + 1, d.width(), d.height()
            xw1 = max(int(x1 - 0.4 * w), 0)
            yw1 = max(int(y1 - 0.4 * h), 0)
            xw2 = min(int(x2 + 0.4 * w), img_w - 1)
            yw2 = min(int(y2 + 0.4 * h), img_h - 1)

            cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 2)
            cv2.rectangle(img, (xw1, yw1), (xw2, yw2), (255, 0, 0), 2)
            # print(img.shape)
            # cv2.imwrite("output/test_{}.jpg".format(i), img)
            faces[i, :, :, :] = fa.align(input_img, gray, detected[i])  
            # faces[i,:,:,:] = cv2.resize(img[yw1:yw2 + 1, xw1:xw2 + 1, :], (img_size, img_size))

        # estimate age, gender
        if len(detected) > 0:
            gender_list = []
            age_list = []
            print("shape:", faces.shape)
            print("len: ", len(faces))
            faces = faces.astype('float32')/255
            gender_arr, age_arr = model.predict(faces)

            # predict gender
            for gen in gender_arr:
                gender_list.append(np.argmax(gen))
            # predict age
            for age in age_arr:
                age_list.append(np.argmax(age))

            # save_age.append(age_list)
            # save_gender.append(gender_list)
            t2 = time.time()
            print("gender: ",gender_list)
            print("age: ", age_list)
            print("time: ", t2-t1)
            
            time_ = (time.time()-start_time)
            if time_ > sec:
                path_out = "output"+"/{}.csv".format(date.today())
                if not os.path.exists(path_out):
                    gen_csv(path_out)
                # save_csv(gender_list, age_list, path_out)
                # print("dump....")
                start_time = time.time()
            count_frame_id = 0

        # draw results
            for i, d in enumerate(detected):
                __age = str(age_list[i])
                draw_age = label_age[__age]
                label = "{}, {}, ID={}".format(draw_age, "F" if gender_list[i] == 0 else "M", i)
                draw_label(img, (d.left(), d.top()), label)
        
        cv2.imshow("Output of predict", img)
        key = cv2.waitKey(1)

        if key == 27:
            break
コード例 #13
0
ファイル: mainMake.py プロジェクト: Ahmed-Dakrory/Proctoring
    def cameraCap(self):
        self.uId = self.userId.get()
        if self.uId != '':
            if self.outputLabel != None:
                self.outputLabel.destroy()

            self.outputLabel = Label(self.framePic, text="Here We Start")
            self.outputLabel.config(font=("Courier", 44))
            self.outputLabel.place(x=400, y=100)

            mark_detector = MarkDetector()
            name = self.uId
            directory = os.path.join(facepath, name)

            if not os.path.exists(facepath):
                os.makedirs(facepath, exist_ok='True')

            if not os.path.exists(directory):
                try:
                    os.makedirs(directory, exist_ok='True')
                except OSError as e:
                    if e.errno != errno.EEXIST:
                        print('invalid student id or access denied')
                        return

            poses = ['frontal', 'right', 'left', 'up', 'down']
            file = 0

            ret, sample_frame = self.cap.read()
            i = 0
            count = 0
            if ret == False:
                return

            # Introduce pose estimator to solve pose. Get one frame to setup the
            # estimator according to the image size.
            height, width = sample_frame.shape[:2]
            pose_estimator = PoseEstimator(img_size=(height, width))

            # Introduce scalar stabilizers for pose.
            pose_stabilizers = [
                Stabilizer(state_num=2,
                           measure_num=1,
                           cov_process=0.1,
                           cov_measure=0.1) for _ in range(6)
            ]
            images_saved_per_pose = 0
            number_of_images = 0

            shape_predictor = dlib.shape_predictor(
                "shape_predictor_68_face_landmarks.dat")
            face_aligner = FaceAligner(shape_predictor,
                                       desiredFaceWidth=FACE_WIDTH)
            while i < 5:
                saveit = False
                # Read frame, crop it, flip it, suits your needs.
                ret, frame = self.cap.read()
                if ret is False:
                    break
                if count % 5 != 0:  # skip 5 frames
                    count += 1
                    continue
                if images_saved_per_pose == IMAGE_PER_POSE:
                    i += 1
                    images_saved_per_pose = 0

                # If frame comes from webcam, flip it so it looks like a mirror.
                if file == 0:
                    frame = cv2.flip(frame, 2)
                original_frame = frame.copy()
                frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

                facebox = mark_detector.extract_cnn_facebox(frame)

                if facebox is not None:
                    # Detect landmarks from image of 128x128.
                    x1 = max(facebox[0] - 0, 0)
                    x2 = min(facebox[2] + 0, width)
                    y1 = max(facebox[1] - 0, 0)
                    y2 = min(facebox[3] + 0, height)

                    face = frame[y1:y2, x1:x2]
                    face_img = cv2.resize(face,
                                          (CNN_INPUT_SIZE, CNN_INPUT_SIZE))
                    face_img = cv2.cvtColor(face_img, cv2.COLOR_BGR2RGB)

                    marks = mark_detector.detect_marks([face_img])

                    # Convert the marks locations from local CNN to global image.
                    marks *= (facebox[2] - facebox[0])
                    marks[:, 0] += facebox[0]
                    marks[:, 1] += facebox[1]

                    # Try pose estimation with 68 points.
                    pose = pose_estimator.solve_pose_by_68_points(marks)

                    # Stabilize the pose.
                    steady_pose = []
                    pose_np = np.array(pose).flatten()
                    for value, ps_stb in zip(pose_np, pose_stabilizers):
                        ps_stb.update([value])
                        steady_pose.append(ps_stb.state[0])
                    steady_pose = np.reshape(steady_pose, (-1, 3))

                    # print(steady_pose[0][0])
                    # if steady_pose[0][0]>0.1:
                    #     print('right')
                    # else:
                    #     if steady_pose[0][0]<-0.1:
                    #         print('left')
                    # if steady_pose[0][1]>0.1:
                    #     print('down')
                    # else:
                    #     if steady_pose[0][1]<-0.1:
                    #         print('up')
                    # print(steady_pose[0])
                    if i == 0:
                        if abs(steady_pose[0][0]) < ANGLE_THRESHOLD and abs(
                                steady_pose[0][1]) < ANGLE_THRESHOLD:
                            images_saved_per_pose += 1
                            saveit = True
                    if i == 1:
                        if steady_pose[0][0] > ANGLE_THRESHOLD:
                            images_saved_per_pose += 1
                            saveit = True
                    if i == 2:
                        if steady_pose[0][0] < -ANGLE_THRESHOLD:
                            images_saved_per_pose += 1
                            saveit = True
                    if i == 3:
                        if steady_pose[0][1] < -ANGLE_THRESHOLD:
                            images_saved_per_pose += 1
                            saveit = True
                    if i == 4:
                        if steady_pose[0][1] > ANGLE_THRESHOLD:
                            images_saved_per_pose += 1
                            saveit = True
                    # Show preview.
                    if i >= 5:
                        print('Thank you')
                        if self.outputLabel != None:
                            self.outputLabel.destroy()

                        self.outputLabel = Label(self.framePic, text="Thanks")
                        self.outputLabel.config(font=("Courier", 44))
                        self.outputLabel.place(x=400, y=100)
                        break

                    frame = cv2.putText(
                        frame, poses[i] + ' : ' + str(images_saved_per_pose) +
                        '/' + str(IMAGE_PER_POSE), (10, 50),
                        cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 1,
                        cv2.LINE_AA)
                    frame = cv2.rectangle(frame, (x1, y1), (x2, y2),
                                          (255, 255, 0), 2)
                    frame = imutils.resize(frame, width=300, height=300)
                    # OpenCV represents images in BGR order; however PIL
                    # represents images in RGB order, so we need to swap
                    # the channels, then convert to PIL and ImageTk format
                    image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                    image = Image.fromarray(image)
                    image = ImageTk.PhotoImage(image)
                    # if the panel is not None, we need to initialize it
                    if self.panel is None:
                        self.panel = Label(self.framePic, image=image)
                        self.panel.image = image
                        self.panel.pack(side=LEFT, padx=0, pady=0)
                        print("Done")
                    # otherwise, simply update the panel
                    else:
                        self.panel.configure(image=image)
                        self.panel.image = image

                    if saveit:
                        face = dlib.rectangle(x1, y1, x2, y2)
                        face_aligned = face_aligner.align(
                            original_frame, frame_gray, face)
                        cv2.imwrite(
                            os.path.join(
                                directory,
                                str(name) + '_' + str(number_of_images) +
                                '.jpg'), face_aligned)
                        print(images_saved_per_pose)
                        number_of_images += 1

            self.cap.release()
        else:
            if self.outputLabel != None:
                self.outputLabel.destroy()

            self.outputLabel = Label(self.framePic,
                                     text="Please Enter a Valid Id")
            self.outputLabel.config(font=("Courier", 44))
            self.outputLabel.place(x=400, y=100)
コード例 #14
0
ファイル: app.py プロジェクト: lykhahaha/Mine
def upload_file():
    file = request.files['image']

    image_path = os.path.sep.join([UPLOAD_FOLDER, file.filename])
    file.save(image_path)
    # image_url = uploader.upload(image_path)
    # image = AgeGenderHelper.url_to_image(image_url['url'])

    # initialize dlib's face detector (HOG-based), then create facial landmark predictor and face aligner
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(deploy.DLIB_LANDMARK_PATH)
    fa = FaceAligner(predictor)

    # initialize image preprocessors
    sp, cp, iap = SimplePreprocessor(
        256, 256, inter=cv2.INTER_CUBIC), CropPreprocessor(
            config.IMAGE_SIZE, config.IMAGE_SIZE,
            horiz=False), ImageToArrayPreprocessor()

    # loop over image paths
    # load image fron disk, resize it and convert it to grayscale
    print(f'[INFO] processing {file.filename}')
    image = cv2.imread(image_path)
    image = imutils.resize(image, width=1024)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    clone = image.copy()

    # detect faces in grayscale image
    rects = detector(gray, 1)

    # loop over face detections
    for rect in rects:
        # determine facial landmarks for face region, then align face
        shape = predictor(gray, rect)
        face = fa.align(image, gray, rect)

        # draw bounding box around face
        x, y, w, h = face_utils.rect_to_bb(rect)
        cv2.rectangle(clone, (x, y), (x + w, y + h), (0, 255, 0), 2)

        if config.DATASET == 'IOG':
            # load Label Encoder and mean files
            print('[INFO] loading label encoders and mean files...')
            age_le = pickle.loads(open(deploy.AGE_LABEL_ENCODER, 'rb').read())
            gender_le = pickle.loads(
                open(deploy.GENDER_LABEL_ENCODER, 'rb').read())
            age_means = json.loads(open(deploy.AGE_MEAN).read())
            gender_means = json.loads(open(deploy.GENDER_MEAN).read())

            # initialize image preprocessors
            age_mp = MeanPreprocessor(age_means['R'], age_means['G'],
                                      age_means['B'])
            gender_mp = MeanPreprocessor(gender_means['R'], gender_means['G'],
                                         gender_means['B'])

            age_preds, gender_preds = predict(face, sp, age_mp, gender_mp, cp,
                                              iap, deploy.AGE_NETWORK_PATH,
                                              deploy.GENDER_NETWORK_PATH,
                                              age_le, gender_le)

        elif config.DATASET == 'ADIENCE':
            # age_preds_cross, gender_preds_cross = [], []

            i = 0
            # load Label Encoder and mean files
            print(
                f'[INFO] loading label encoders and mean files for cross validation {i}...'
            )
            age_le = pickle.loads(
                open(deploy.AGE_LABEL_ENCODERS[i], 'rb').read())
            gender_le = pickle.loads(
                open(deploy.GENDER_LABEL_ENCODERS[i], 'rb').read())
            age_means = json.loads(open(deploy.AGE_MEANS[i]).read())
            gender_means = json.loads(open(deploy.GENDER_MEANS[i]).read())

            # initialize image preprocessors
            age_mp = MeanPreprocessor(age_means['R'], age_means['G'],
                                      age_means['B'])
            gender_mp = MeanPreprocessor(gender_means['R'], gender_means['G'],
                                         gender_means['B'])

            age_preds, gender_preds = predict(face, sp, age_mp, gender_mp, cp,
                                              iap, deploy.AGE_NETWORK_PATHS[i],
                                              deploy.GENDER_NETWORK_PATHS[i],
                                              age_le, gender_le)
            # age_preds_cross.append(age_pred)
            # gender_preds_cross.append(gender_pred)

            # age_preds, gender_preds = np.mean(age_preds_cross, axis = 0), np.mean(gender_preds_cross, axis = 0)

        clone = AgeGenderHelper.visualize_video(age_preds, gender_preds,
                                                age_le, gender_le, clone,
                                                (x, y))

    # path = image_path.split('.')
    # pred_path = '.'.join([f'{path[0]}_predict', path[1]])
    # pred_filename = pred_path.split(os.path.sep)[-1]
    pred_path = '.'.join([f"{image_path.split('.')[0]}_1", 'jpg'])
    cv2.imwrite(pred_path, clone)
    # image_url = uploader.upload(pred_path)
    gc.collect()
    K.clear_session()

    return render_template('index.html',
                           filename=pred_path.split(os.path.sep)[-1])
コード例 #15
0
                (startX, startY, endX, endY) = box.astype('int')
                face_locations.append((startX, startY, endX, endY))

        #print(len(face_locations))
        if len(face_locations) == 0:
            cv2.imshow('face', img)
            key = cv2.waitKey(1) & 0xFF

            if key == ord("q"):
                break
            continue
            #cv2.destroyAllWindows()

        for i in range(len(face_locations)):
            (startX, startY, endX, endY) = face_locations[i]
            faceAligned = fa.align(img, img, dlib.rectangle(startX, startY, endX, endY))
            face_array = faceAligned[:,:,::-1]
            #face_array = face_array[startY:endY, startX:endX]

            face_encode = face_recognition.face_encodings(face_array)
            face_encode = np.array(face_encode)
            #print(face_encode.shape)

            if face_encode.shape[0] == 0:
                cv2.rectangle(img, (startX, startY), (endX, endY),
                        (0, 0, 255), 2)
                #cv2.imshow('face', img)
                #key = cv2.waitKey(1) & 0xFF

                #if key == ord("q"):
                    #break
コード例 #16
0
    frame = vs.read()
    frame = imutils.resize(frame, width=600)
    height, width = frame.shape[:2]
    gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    # detect faces in the grayayscale frame
    rects = detector(gray_frame, 0)

    key = cv2.waitKey(1) & 0xFF
    # if the `q` key was pressed, break from the loop
    if key == ord("q"):
        break

    # if the `s` key was pressed save the first found face
    if key == ord('s'):
        if len(rects) > 0:
            faceAligned = fa.align(frame, gray_frame, rects[0])
            image_name = base_dir + str(count_image) + ".png"
            # save image
            cv2.imwrite(image_name, faceAligned)
            # show image
            cv2.imshow(image_name, faceAligned)
            count_image += 1
            # if count_image > 10:
            #     count_image = 0

    # loopop over the face detections
    for rect in rects:
        (x, y, w, h) = face_utils.rect_to_bb(rect)
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 1)

    # show the frame
コード例 #17
0
        input(" Press enter to continue ! ")
    check.clear()  # init list


while (True):
    ret, frame = cap.read()
    faceAlign = frame.copy()
    start = time.time()

    dets, scores, idx = detector.run(frame, 0, tolerance)

    for i, d in enumerate(dets):
        cv2.rectangle(frame, (d.left(), d.top()), (d.right(), d.bottom()),
                      (255, 0, 0), 2)  # 將人臉先框起來
        faceAlign = fa.align(
            frame, frame,
            d)  # new photo , 由原本的圖片變成只有臉的圖片, photo size = 256 * 256
        dets, scores, idx = detector.run(faceAlign, 0,
                                         0)  # new photo 還需要再 detect一次
        for i1, d1 in enumerate(dets):
            shape = sp(faceAlign, d1)  # mark 68_face_landmarks
            face_descriptor = np.array(
                [facerec.compute_face_descriptor(faceAlign, shape)])
            prediction = cnn_model.predict_proba(face_descriptor)

            highest_proba = 0
            counter = 0
            # print prediction
            for prob in prediction[0]:
                if prob > highest_proba and prob >= 0.1:
                    highest_proba = prob
コード例 #18
0
def test():

    # 取得dlib預設的臉部偵測器
    detector = dlib.get_frontal_face_detector()
    # 根據shape_predictor方法載入68個特徵點模型,此方法為人臉表情識別的偵測器
    predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
    # 臉部校正
    fa = FaceAligner(predictor, desiredFaceWidth=256)
    # 載入人臉辨識檢測器
    facerec = dlib.face_recognition_model_v1(
        "dlib_face_recognition_resnet_model_v1.dat")
    # 比對人臉描述子列表
    descriptors = []
    # 比對人臉名稱列表
    candidate = []
    # 圖片路徑
    path = []

    # 開鏡頭前先播放聲音
    playsound('./sound/voice1_ch.mp3')
    playsound('./sound/voice1_en.mp3')
    playsound('./sound/voice1_j.mp3')
    # Play Music on Separate Thread (in background)
    # music_thread = Thread(target=play_music)
    # music_thread.start()

    # 開啟影片檔案
    cap = cv2.VideoCapture(0)
    # 比對人臉圖片資料夾名稱
    faces_folder_path = "./rec"
    WHO = None
    Erro = 0
    # 有讀過的照片存成新的檔可直接使用,有新的圖片再加

    # 路徑中的file個數
    NUM_OF_FILES = 0  # 檔案數
    for fn in os.listdir(faces_folder_path):
        NUM_OF_FILES += 1
    #print(NUM_OF_FILES)

    person = [0 for i in range(NUM_OF_FILES)]
    #print(person)

    # 原本的個數
    f = open("numOfFiles.txt")
    num_o = f.read()
    num_o = int(num_o)
    h5file = './desc.h5'
    #print(sorted(glob.glob(os.path.join(faces_folder_path, "*.jpg")),key=os.path.getmtime))
    # 所有檔名
    for f in sorted(glob.glob(os.path.join(faces_folder_path, "*.jpg")),
                    key=os.path.getmtime):
        base = os.path.basename(f)
        # print(base)
        # 依序取得圖片檔案人名,存到candidate一維陣列中
        candidate.append(os.path.splitext(base)[0])
        # print(candidate)
        path.append(f)
    #print(path)

    # 如果新的檔案數與舊的檔案數不同
    if num_o != NUM_OF_FILES:
        # 把新的個數寫到txt檔裡
        num = open('numOfFiles.txt', 'w')
        num.write(str(NUM_OF_FILES))
        num.close()

        # 新增檔名 new NUM_OF_FILES  ori num_o
        c = open("cand.txt", "a", encoding="utf-8")
        for i in range(num_o - 1, NUM_OF_FILES - 1, 1):  # -2 -1是mac檔多一個檔案
            c.write('\n')
            c.write(candidate[i])

        c.close()

        # 新增one shot計算匡
        # 讀取資料夾裡的圖片及檔案名(人名),並將每張圖的128維特徵向量存到description一維陣列中
        h5f = h5py.File(h5file, 'a')
        for i in range(num_o - 1, NUM_OF_FILES - 1, 1):
            img = io.imread(path[i])
            gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
            # 1.人臉偵測
            face_rects = detector(img, 0)
            # print(face_rects)
            for index, face in enumerate(face_rects):
                # 2.人臉校正
                faceAligned = fa.align(img, gray, face)
                # 3.再人臉偵測(去除校正後多餘的部分)
                face_rects2 = detector(faceAligned, 1)
                for index2, face2 in enumerate(face_rects2):
                    ax1 = face2.left()
                    ay1 = face2.top()
                    ax2 = face2.right()
                    ay2 = face2.bottom()
                    # 4.68特徵點偵測
                    shape = predictor(faceAligned, face2)
                    # 5.取得描述子,128維特徵向量
                    face_descriptor = facerec.compute_face_descriptor(
                        faceAligned, shape)
                    # 轉換numpy array格式
                    v = np.array(face_descriptor)
                    descriptors.append(v)
                    #print(face_rects)
            # original的在face_allIn_final0.py
            h5f.create_dataset('new{}'.format(i), data=v)
        h5f.close()
        # 存新的到原本的txt檔
        np.savetxt('desc_txt.txt', descriptors)

    # 檢查用
    # 讀檔
    kk = h5py.File(h5file, 'r')
    #print(type(h5f))
    #print(h5f)
    # 通過切片得到numpy陣列
    #print(list(kk.keys()))
    #print(h5f['new8'][:])
    #下面這條要在裡面執行,現在在外面

    #for i in range(0, NUM_OF_FILES-1, 1):
    #print(kk['new{}'.format(i)][:])

    # 以迴圈從影片檔案讀取影格,並顯示出來
    while cap.isOpened():
        k = open("cand.txt", encoding="utf-8")
        cand = k.readlines()
        x1 = 0
        x2 = 0
        y1 = 0
        y2 = 0
        # 從視訊鏡頭擷取畫面
        ret, frame = cap.read()
        # 縮小圖片
        frame = imutils.resize(frame, width=800)  # 顯示影像框框
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
        # 1.人臉偵測
        face_rects = detector(frame, 1)
        # 取出所有偵測的結果(所有人臉座標點)
        for index, rect in enumerate(face_rects):
            x1 = rect.left()
            y1 = rect.top()
            x2 = rect.right()
            y2 = rect.bottom()
            # 以方框標示偵測的人臉
            cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 4,
                          cv2.LINE_AA)
            # 2.人臉校正
            faceAligned = fa.align(frame, gray, rect)
            # 3.再人臉偵測(去除校正後多餘的部分)
            face_rects2 = detector(faceAligned, 1)
            # 取出所有偵測的結果(所有人臉座標點)
            for index2, rect2 in enumerate(face_rects2):
                ax1 = rect2.left()
                ay1 = rect2.top()
                ax2 = rect2.right()
                ay2 = rect2.bottom()
                # 4.68特徵點偵測
                shape = predictor(faceAligned, rect2)
                # 5.取得描述子,128維特徵向量
                face_descriptor = facerec.compute_face_descriptor(
                    faceAligned, shape)
                # 轉換numpy array格式
                d_test = np.array(face_descriptor)
                # 計算歐式距離  (與圖片庫裡,各個人臉間的距離)(5張照片就有5個距離)
                # 開h5檔案
                h5f = h5py.File(h5file, 'r')
                # 清空 存放人臉距離的陣列
                dist = []
                for i in range(0, NUM_OF_FILES - 1, 1):  #-1就好
                    # 計算距離
                    dist_ = np.linalg.norm(h5f['new{}'.format(i)][:] - d_test)
                    # 加入陣列
                    dist.append(dist_)
                # 辨識人名
                if dist != []:
                    # 將比對人名和比對出來的歐式距離組成一個dict
                    c_d = dict(zip(cand, dist))
                    # 根據歐式距離由小到大排序 [("名字",距離)]二微陣列
                    cd_sorted = sorted(c_d.items(), key=lambda d: d[1])
                    #print(cd_sorted)
                    # 歐式距離(0~1)越小越像,設定0.5作為最低辨識標準
                    if cd_sorted[0][1] < 0.4:
                        rec_name = cd_sorted[0][0]
                    else:
                        rec_name = "No Data"
                        Erro = Erro + 1
                        if Erro == 6:
                            WHO = "No Data"
                            cap.release()
                            cv2.destroyAllWindows()
                            return WHO
                            break

                # 標示辨識的人名(中文)
                imgPil = Image.fromarray(frame)
                font = ImageFont.truetype("C:/Windows/Fonts/msjh.ttc", 20)
                draw = ImageDraw.Draw(imgPil)
                draw.fontmode = '1'  # 關閉反鋸齒
                draw.text((x1, y1 - 20),
                          rec_name,
                          font=font,
                          fill=(255, 255, 255))
                frame = np.array(imgPil)
                for index, name in enumerate(cand):
                    if rec_name == name:
                        person[index] = person[index] + 1
                        # print(person)
                        if person[index] == 4:
                            WHO = cand[index]
                            # 標示辨識的人名(中文)
                            if WHO is not None:
                                time.sleep(5)
                                cap.release()
                                cv2.destroyAllWindows()
                                print(WHO)
                                return WHO
                            break
                    elif sum(person) > 8:
                        WHO = "No Data"
                        cap.release()
                        cv2.destroyAllWindows()
                        return WHO
                        break

            # 標示辨識的人名(只能標示英文)
            # cv2.putText(frame, rec_name, (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
        # CV2是用BGR
        frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
        # 顯示結果
        cv2.imshow("Face Detection", frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cap.release()
    cv2.destroyAllWindows()
コード例 #19
0
def Euclied_dist(inp1, inp2):
    return np.sqrt(np.sum(np.square(inp1 - inp2)))


images = []
for i in os.listdir(base_dir):
    for j in os.listdir(os.path.join(base_dir, i)):
        img = cv2.imread(os.path.join(base_dir, i, j))
        #print(i,j)
        faces = fd(img, 1)
        f = faces[0]
        #print(len(faces))
        x, y, w, h = f.left(), f.top(), (f.right() - f.left()), (f.bottom() -
                                                                 f.top())
        temp = fa.align(img, cv2.cvtColor(img, cv2.COLOR_BGR2GRAY), f)
        temp = cv2.resize(cv2.cvtColor(temp, cv2.COLOR_BGR2GRAY), (160, 160))
        cv2.imwrite(os.path.join(base_dir, r'temp.jpg'), temp)
        temp = cv2.imread(os.path.join(base_dir, r'temp.jpg'))
        os.remove(os.path.join(base_dir, r'temp.jpg'))
        images.append(temp)

images = Standardize(np.array(images))

np.save(os.path.join(r'D:\ProjectML\FR', r'FImages'), images)

images = np.load(os.path.join(r'D:\ProjectML\FR', r'FImages.npy'))

# LABEL ENCODING FOR ALL THE FACES i.E CONVERTING NAMES OF FACES TO NUMERICAL VALUE
labels = []
for i in os.listdir(base_dir):
コード例 #20
0
ファイル: align_faces.py プロジェクト: bobsawey/kuu
args = vars(ap.parse_args())

# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor and the face aligner
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args["shape_predictor"])
fa = FaceAligner(predictor, desiredFaceWidth=256)

# load the input image, resize it, and convert it to grayscale
image = cv2.imread(args["image"])
image = imutils.resize(image, width=800)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# show the original input image and detect faces in the grayscale
# image
cv2.imshow("Input", image)
rects = detector(gray, 2)

# loop over the face detections
for rect in rects:
    # extract the ROI of the *original* face, then align the face
    # using facial landmarks
    (x, y, w, h) = rect_to_bb(rect)
    faceOrig = imutils.resize(image[y:y + h, x:x + w], width=256)
    faceAligned = fa.align(image, gray, rect)

    # display the output images
    cv2.imshow("Original", faceOrig)
    cv2.imshow("Aligned", faceAligned)
    cv2.waitKey(0)
コード例 #21
0
def detect_faces(color_image_list, gray_image_list, dlib_models):
    """
    Detects faces using Dlib's CNN model.

    Args:
        color_image_list (list): list of images. Each item is a frame read by the cv2 package.
        gray_image_list (list): list of images in grayscale. This list should contain the same images
            as the color_image_list, but in grayscale. This list is used by the CNN model.
        dlib_models (dict): a dictionary containing dlib cnn, shape_predictor, and recognition models.

    Returns:
        face_images (np.array): an array of images of detected faces.
        n_faces_list (list): a list of ints showing the number of detected faces in each frame.
        flat_face_rects (list): a list of dlib.rectangle objects containing rectangle info of each detected face.
        face_descriptors (list): list of face_descriptor. A face_descriptor is a lists of 128 dim vector that describes the face.

    Example:
        Given the following inputs
        color_image_list = [Img1 (has three faces), Img2 (has two faces)]
        gray_image_list = [Img1_grayscale (has three faces), Img2_grayscale (has two faces)]
        This function produces the following
        face_images = np.array([face1_img1, face2_img1, face3_img1, face4_img2, face5_img2])
        n_faces_list = [3, 2]
        flat_face_rects = [r1, r2, r3, r4, r5]
        Note that the face images are cropped from the original image in the arguments. So, in our
        example, face1_img1 has a smaller size of Img1, and face2_img1 might have a compeletely
        different size but again smaller than Img1.
    """

    mmod_rects = dlib_models['cnn'](gray_image_list, upsample_num_times=UPSAMPLE_COUNT)

    flat_face_rects = []
    flat_image_list_indices = []
    n_faces_list = []
    all_shapes_list = []
    # mmod_rects is a list of list of rectangles
    for i, image_detection_rects in enumerate(mmod_rects):
        rects = dlib.rectangles()

        # save rects into an array to use later
        rects.extend([d.rect for d in image_detection_rects])
        flat_face_rects.extend(rects)
        flat_image_list_indices.extend([i]*len(image_detection_rects))
        n_faces_list.append(len(image_detection_rects))

        # find shapes in the image -- this is used for face recognition
        faces = dlib.full_object_detections()
        for r in rects:
            shape = dlib_models['shape_predictor'](color_image_list[i], r)
            faces.append(shape)
        all_shapes_list.append(faces)

    # in the above example
    # flat_face_rects = [r1, r2, r3, r4, r5]
    # flat_image_list_indices = [0, 0, 0, 1, 1]
    # n_faces_list = [3, 2]
    # all_shapes_list = [dlib.full_object_detections, dlib.full_object_detections]

    # align detected rectangles to get faces for the next step
    fa = FaceAligner(dlib_models['shape_predictor'])
    face_images = []
    for i, rect in enumerate(flat_face_rects):
        image_index = flat_image_list_indices[i]
        aligned_image = fa.align(color_image_list[image_index], gray_image_list[image_index], rect)
        aligned_image = imutils.resize(aligned_image, width=160, height=160)
        face_images.append(aligned_image)

    # in the above example
    # face_images = [img1, img2, img3, img4, img5]

    # face encodings
    face_descriptors = dlib_models['recognition_model'].compute_face_descriptor(color_image_list, all_shapes_list)
    # face_descriptor is a lists of 128 dim vector that describes the face.
    # if two face descriptor vectors have a Euclidean distance between them less than 0.6
    # then they are from the same person
    # in the above example
    # face_descriptors = [[[0..127],[0..127],[0..127]],[[0..127],[0..127]]]

    return np.array(face_images), n_faces_list, flat_face_rects, face_descriptors
コード例 #22
0
def mark_your_attendance(request):

    detector = dlib.get_frontal_face_detector()

    predictor = dlib.shape_predictor(
        'face_recognition_data/shape_predictor_68_face_landmarks.dat'
    )  #Add path to the shape predictor ######CHANGE TO RELATIVE PATH LATER
    svc_save_path = "face_recognition_data/svc.sav"

    with open(svc_save_path, 'rb') as f:
        svc = pickle.load(f)
    fa = FaceAligner(predictor, desiredFaceWidth=96)
    encoder = LabelEncoder()
    encoder.classes_ = np.load('face_recognition_data/classes.npy')

    faces_encodings = np.zeros((1, 128))
    no_of_faces = len(svc.predict_proba(faces_encodings)[0])
    count = dict()
    present = dict()
    log_time = dict()
    start = dict()
    for i in range(no_of_faces):
        count[encoder.inverse_transform([i])[0]] = 0
        present[encoder.inverse_transform([i])[0]] = False

    vs = VideoStream(src=0).start()

    sampleNum = 0

    while (True):

        frame = vs.read()

        frame = imutils.resize(frame, width=800)

        gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        faces = detector(gray_frame, 0)

        for face in faces:
            print("INFO : inside for loop")
            (x, y, w, h) = face_utils.rect_to_bb(face)

            face_aligned = fa.align(frame, gray_frame, face)
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 1)

            (pred, prob) = predict(face_aligned, svc)

            if (pred != [-1]):

                person_name = encoder.inverse_transform(np.ravel([pred]))[0]
                pred = person_name
                if count[pred] == 0:
                    start[pred] = time.time()
                    count[pred] = count.get(pred, 0) + 1

                if count[pred] == 4 and (time.time() - start[pred]) > 1.2:
                    count[pred] = 0
                else:
                    #if count[pred] == 4 and (time.time()-start) <= 1.5:
                    present[pred] = True
                    log_time[pred] = datetime.datetime.now()
                    count[pred] = count.get(pred, 0) + 1
                    print(pred, present[pred], count[pred])
                cv2.putText(frame,
                            str(person_name) + str(prob), (x + 6, y + h - 6),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)

            else:
                person_name = "unknown"
                cv2.putText(frame, str(person_name), (x + 6, y + h - 6),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)

            #cv2.putText()
            # Before continuing to the next loop, I want to give it a little pause
            # waitKey of 100 millisecond
            #cv2.waitKey(50)

        #Showing the image in another window
        #Creates a window with window name "Face" and with the image img
        cv2.imshow("Mark Attendance - In - Press q to exit", frame)
        #Before closing it we need to give a wait command, otherwise the open cv wont work
        # @params with the millisecond of delay 1
        #cv2.waitKey(1)
        #To get out of the loop
        key = cv2.waitKey(50) & 0xFF
        if (key == ord("q")):
            break

    #Stoping the videostream
    vs.stop()

    # destroying all the windows
    cv2.destroyAllWindows()
    update_attendance_in_db_in(present)

    # return redirect('recognition/admin-dashboard')
    return render(request, 'recognition/admin_dashboard.html')
            cv2.imshow("Greška", image)
            cv2.waitKey(30)
            #čekanje da korisnik primjeti sliku
            time.sleep(1.2)
            cv2.destroyAllWindows(
            )  # uništavanje prozora slike i nastavak rada petlje
            continue
        #ako slika ima samo jedno lice prikazat je
        cv2.imshow("Orginal slika", image)

        for rect in rects:
            #pretvaranje pravougaonika lica u visini širinu i početne tačke x,y
            (x, y, w, h) = rect_to_bb(rect)
            #smanjivanje pronađenog lica na 256 piksela širine
            faceOrig = imutils.resize(image[y:y + h, x:x + w], width=256)
            faceAligned = fa.align(image, gray, rect)  #rotiranje lica

            LokacijaSlike = str(
                pathlib.Path(__file__).parent.absolute().joinpath("podaci").
                joinpath("korisnici").joinpath(folder).joinpath(slike))

            height, witdh, channell = faceAligned.shape
            CentarSlike = (w / 2, h / 2)
            #preokret lica za 180 stepeni
            Rotirana = imutils.rotate(faceAligned, 180)
            cv2.imwrite(LokacijaSlike, Rotirana)
            cv2.imshow("Lice Neispravljeno", faceOrig)
            cv2.imshow("Lice Ispravljeno", Rotirana)
            cv2.waitKey(30)

            time.sleep(1.2)
コード例 #24
0
# detect faces in the grayscale image
cv2.imshow("Input", image)
rects = detector(gray, 1)

# loop over the faces that are detected
for (i, rect) in enumerate(rects):
    # Detected face landmark (x, y)-coordinates are converted into
    # Numpy array
    shape = predictor(gray, rect)
    shape = face_utils.shape_to_np(shape)

    # convert dlib's rectangle to OpenCV bounding box and draw
    # [i.e., (x, y, w, h)]
    (x, y, w, h) = face_utils.rect_to_bb(rect)
    faceOrig = imutils.resize(image[y:y + h, x:x + w], width=256)
    faceAligned = face.align(image, gray, rect)
    cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)

    import uuid
    f = str(uuid.uuid4())
    cv2.imwrite("foo/" + f + ".png", faceAligned)

    # shows the face number
    cv2.putText(image, "Face #{}".format(i + 1), (x - 10, y - 10),
                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

    # loop over the (x, y) coordinate for facial landmark and drow on th image
    for (x, y) in shape:
        cv2.circle(image, (x, y), 1, (0, 0, 255), -1)

    cv2.imshow("Original", faceOrig)
                                          (0, 0, 255), 2)

                            # Draw a label with a name below the face
                            cv2.rectangle(frame, (left, bottom - 35),
                                          (right, bottom), (0, 0, 255),
                                          cv2.FILLED)
                            font = cv2.FONT_HERSHEY_DUPLEX
                            cv2.putText(frame, name, (left + 6, bottom - 6),
                                        font, 1.0, (255, 255, 255), 1)

                            # Estimate gender and age
                            # Convert to rect
                            face_detected = _css_to_rect(
                                (top, right, bottom, left))
                            print(face_detected)
                            faces[0, :, :, :] = fa.align(
                                rgb_small_frame, frame_gray, face_detected)
                            age_predict, gender_predict = sess.run(
                                [age, gender],
                                feed_dict={
                                    images_pl: faces,
                                    train_mode: False
                                })

                            label = "{}, {}".format(
                                int(age_predict),
                                "F" if gender_predict == 0 else "M")
                            cv2.putText(frame, label, (left + 6, top - 6),
                                        font, 1.0, (255, 255, 255), 1)

                            c.execute(
                                "insert into people_inroom values (%s, %s, %s)",