コード例 #1
0
ファイル: main.py プロジェクト: gnoya/face_recognition
from face_detection import FaceDetection
from face_recognition import FaceRecognition
from utils import read_image

if __name__ == "__main__":
    # Init the Face Detection and Face Recognition classes
    detection = FaceDetection()
    recognition = FaceRecognition()

    # Read the image
    image = read_image('./yolov3/data/samples/person.jpg')

    # Detect a face in the image (if many, returns the biggest one; if none, returns None)
    bounding_box = detection.detect(image)

    # bounding_box is a dictionary with parameters: x1, y1, x2, y2, width, height
    print(bounding_box)

    if bounding_box is not None:
        # Plot the bounding box on the image
        detection.plot(image, bounding_box)

        # Extract the face from the image
        face = recognition.extract(image, bounding_box)

        # Check if the face is from an employee, return True or False
        is_employee = recognition.recognize(face)

        if is_employee:
            print('Opening Door')
コード例 #2
0
def main():
    fd = FaceDetection()
    fd.detect(MediaType.CAMERA)
コード例 #3
0
#ラベルデータを取得するため、データセットをロード
with open(dir + pickle_file, mode='rb') as f:
    image_data = pickle.load(f)  #使わない
    label_data = pickle.load(f)  #使わない
    index_to_label = pickle.load(f)  #ラベル(数値)→ラベル(文字列)にするdict型データをロード

print('camera opend')
cv2.namedWindow("faces", cv2.WINDOW_AUTOSIZE)  #顔画像表示用ウィンドウ
cv2.namedWindow("camera", cv2.WINDOW_NORMAL)  #カメラ画像表示用ウィンドウ
key = 0

#mainループ
while (key != ord("q")):  #'q'で停止
    key = cv2.waitKey(50)  #キー入力待ち(50msce)
    img = cap.ReadCaputure()  #カメラからframe画像を取得
    draw_img = img.copy()  #描画用の画像を取得
    p_faces = detect.detect(draw_img)  #顔の座標を取得
    detect.draw_rec(draw_img, p_faces)  #顔画像を赤線で描画
    cv2.imshow("camera", draw_img)  #画像表示
    if (len(p_faces) != 0):
        catface, images = detect.face_images(img, p_faces)  #顔画像を取得
        cv2.imshow("faces", catface)
        if (key == ord("n")):  #'n'押下で推論
            for image in images:
                gray_img = cv2.cvtColor(image,
                                        cv2.COLOR_BGR2GRAY)  #グレースケール画像に変換
                predict = facerec.predict(gray_img)  #推論
                predict_label = index_to_label[predict[0]]  #推論ラベルを取得
                print("predict: " + predict_label)
#Create HeadPose model
headpose = HeadPose(ie, HEADPOSE_XML_PATH, HEADPOSE_BIN_PATH)

#init video from rtsp link
video = cv2.VideoCapture(CAM_ADDR)

#init video from build-in camera
# video = cv2.VideoCapture(0)

while (video.isOpened()):
    ret, frame = video.read()
    show_frame = frame.copy()
    h, w, c = frame.shape

    #get face on frame
    outputs = facedetection.detect(frame)

    if len(outputs) != 0:
        outputs = np.array(outputs)
        for output in outputs:
            try:
                #get face location
                x_min, y_min, x_max, y_max = (output *
                                              [w, h, w, h]).astype(int)

                #crop face
                img_cropped = frame[y_min:y_max, x_min:x_max]

                #get face tilt
                yaw = headpose.detect(img_cropped)['angle_y_fc'][0][0]