예제 #1
0
from src.utils import load_tf_ssd_detection_graph,run_inference_for_single_image_through_ssd,post_process_ssd_predictions\
    ,load_tf_facenet_graph,crop_ssd_prediction,prewhiten,get_face_embeddings,print_recognition_output,draw_detection_box

import configparser
config = configparser.ConfigParser()
config.read('config.ini')

# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_PERSON_DETECTION = config.get("DEFAULT","PATH_TO_PERSON_DETECTION")

if __name__ == "__main__":

    with tf.Graph().as_default():

        ### Creating and Loading the Single Shot Detector ###
        image_tensor, tensor_dict = load_tf_ssd_detection_graph(PATH_TO_PERSON_DETECTION, input_graph=None)

        sess = tf.Session()
        with sess.as_default():
            cap = cv2.VideoCapture(0)

            if cap.isOpened() is False:
                print("Error opening video stream or file")

            while cap.isOpened():
                _, image = cap.read()
                image = image[..., ::-1, :]
                image_display = image.copy()

                initial_inference_start_time = time.time()
                # Both the SSD and Facenet also uses np.uint8 and RGB images for both!
                        xmin = boxes[1]
                        ymax = boxes[2]
                        xmax = boxes[3]
                        conf_score = cur_det[4]
                        cv2.rectangle(
                            image_np, (int(xmin), int(ymin)),
                            (int(xmax), int(ymax)), (255, 0, 0), 3
                        )  #This is still RGB here,that's why the first element is Red

                    image_np_bgr = image_np[..., ::-1]
                    cv2.imshow('face-detection-ssd', image_np_bgr)
                    print("Press Escape to go to next image")
                    if cv2.waitKey(1) == 27:
                        break

    cv2.destroyAllWindows()
    return


if __name__ == "__main__":
    PATH_TO_FROZEN_GRAPH = './model/ssd_models/ssd_mobilenet_v1_focal_loss_face_mark_2.pb'
    PATH_TO_IMAGES_DIR = './images/'

    main_graph = tf.Graph()
    image_tensor, tensor_dict = load_tf_ssd_detection_graph(
        PATH_TO_FROZEN_GRAPH, input_graph=main_graph)
    run_tf_object_detection_images(main_graph,
                                   image_tensor,
                                   tensor_dict,
                                   path_to_images_dir=PATH_TO_IMAGES_DIR)
예제 #3
0
]
FINAL_DETECTION_PATH = config.get("DEFAULT",
                                  "PATH_TO_FINAL_DETECTION_DIRECTORY")
FACENET_MODEL_PATH = config.get("DEFAULT", "PATH_TO_FACENET_MODEL")
CLASSIFIER_PATH = config.get("DEFAULT", "PATH_TO_SVM_EMBEDDINGS_CLASSIFIER")

CROP_SSD_PERCENTAGE = float(config.get("DEFAULT", "CROP_SSD_PERCENTAGE"))
IMAGE_SIZE = int(config.get("DEFAULT", "IMAGE_SIZE"))
FACENET_PREDICTION_BATCH_SIZE = int(
    config.get("DEFAULT", "FACENET_PREDICTION_BATCH_SIZE"))

if __name__ == "__main__":

    with tf.Graph().as_default():

        image_tensor, tensor_dict = load_tf_ssd_detection_graph(
            PATH_TO_CKPT, input_graph=None)
        sess = tf.Session()
        with sess.as_default():

            ### Creating and Loading MTCNN ###
            pnet, rnet, onet = create_mtcnn(sess, None)

            ### Creating and Loading the Facenet Graph ###
            images_placeholder, embeddings, phase_train_placeholder = load_tf_facenet_graph(
                FACENET_MODEL_PATH)

            for image_id, SOURCE_IM_PATH in enumerate(SOURCE_IM_PATH_ARRAY):
                initial_inference_start_time = time.time()

                image = cv2.imread(SOURCE_IM_PATH)
                image_np = (cv2.cvtColor(image, cv2.COLOR_BGR2RGB)).astype(
                    xmin = boxes[1]
                    ymax = boxes[2]
                    xmax = boxes[3]
                    conf_score = cur_det[4]
                    cv2.rectangle(
                        image_np, (int(xmin), int(ymin)),
                        (int(xmax), int(ymax)), (255, 0, 0), 3
                    )  #This is still RGB here,that's why the first element is Red

                image_np_bgr = image_np[..., ::-1]
                cv2.imshow('face-detection-ssd', image_np_bgr)
                if cv2.waitKey(1) == 27:
                    break

    cap.release()
    cv2.destroyAllWindows()
    return


if __name__ == "__main__":
    PATH_TO_FACE_DETECTION = config.get("DEFAULT", "PATH_TO_FACE_DETECTION")
    # PATH_TO_VIDEO = '/Users/petertanugraha/Projects/tf-pose-estimation/test_video/uoft_lecture_trimmed.mp4'

    main_graph = tf.Graph()
    image_tensor, tensor_dict = load_tf_ssd_detection_graph(
        PATH_TO_FACE_DETECTION, input_graph=main_graph)
    run_tf_object_detection_video(main_graph,
                                  image_tensor,
                                  tensor_dict,
                                  path_to_video=None)
            color_pallete = (int(color_pallete[0]), int(color_pallete[1]),
                             int(color_pallete[2]))
        else:
            color_pallete = (0, 0, 255)
        self.color_pallete = color_pallete


if __name__ == "__main__":

    # Global variable that will hold all the "Recognized Human in the whole lifetime of the current recording session"
    Identified_Human_List = []
    with tf.Graph().as_default():

        ### Loading Face Detector ###
        Face_Detection_Graph = tf.Graph()
        image_tensor, tensor_dict = load_tf_ssd_detection_graph(
            PATH_TO_FACE_DETECTION, input_graph=Face_Detection_Graph)
        face_detection_sess = tf.Session(graph=Face_Detection_Graph)

        ### Loading Person Detector ###
        person_image_tensor, person_tensor_dict = load_tf_ssd_detection_graph(
            PATH_TO_PERSON_DETECTION, input_graph=None)
        main_sess = tf.Session()

        ### Loading the SVM Classifier for Face ID classification ###
        with open(CLASSIFIER_PATH_SVM, 'rb') as infile:
            (model, class_names) = pickle.load(infile)

        ### Loading the KNN Classifier for Face Recognition Classifier ###
        with open(CLASSIFIER_PATH_KNN, 'rb') as infile:
            knn_model = pickle.load(infile)