Example #1
0
def get_face_softmax_regressor_classifier_model_and_labels(
) -> (dict, keras.models.Model):
    """This function returns a dictionary which represents output results
     and our softmax regressor which classifies face images."""

    labels_file_path = utils.get_or_download(
        'persons.json', 'https://drive.google.com/uc?id=1EDegJgHR76PCEyEFMn'
        '-gbYJrO0v6Od6A')
    classifier_file_path = utils.get_or_download(
        'face_classifier_model.h5', 'https://drive.google.com/uc?id=1PGO666v'
        '-Yk3H46ihf57ie_dObLiTdyHS')

    with open(labels_file_path) as fp:
        person_labels = json.load(fp)

    classifier_model = tf.keras.models.load_model(classifier_file_path)

    return person_labels, classifier_model
Example #2
0
def get_emotion_model() -> (keras.models.Model, list):
    """This function returns facial expression prediction model with it's pre-trained weights
    loaded and emotion labels which used to make sesse of the predictions."""

    model_str_file_path = utils.get_or_download(
        "facial_expression_model_structure.json", "https://drive.google.com"
        "/uc?id="
        "1GzVBzxSuYfChqZP4efs6ZUl4"
        "9Sn5G6Ws")
    model_weights_file_path = utils.get_or_download(
        "facial_expression_model_weights.h5", "https://drive.google.com"
        "/uc?id=1o5wjB5G1pfyY7ppPb"
        "TIRIHMWRCoNUwkA")

    model = model_from_json(open(model_str_file_path, "r").read())
    model.load_weights(model_weights_file_path)

    labels = ('angry', 'disgust', 'fear', 'happy', 'sad', 'surprise',
              'neutral')

    return model, labels
Example #3
0
def get_gender_model() -> keras.models.Model:
    """This function returns gender prediction model with it's pre-trained weights loaded."""
    model = get_vgg_face_model()

    base_model_output = Convolution2D(2, (1, 1), name='predictions')(
        model.layers[-4].output)
    base_model_output = Flatten()(base_model_output)
    base_model_output = Activation('softmax')(base_model_output)

    gender_model = Model(inputs=model.input, outputs=base_model_output)

    file_path = utils.get_or_download(
        "gender_model_weights.h5", "https://drive.google.com/uc?id="
        "1FDdW_U45pG2upMO3mKxsxQhb4YNIwEg2")

    gender_model.load_weights(file_path)

    return gender_model
Example #4
0
def get_vgg_face_model_embedding_extractor() -> keras.models.Model:
    """This function removes last two layers of vgg model and returns it. The resulting method
    is used to extract facial embeddings from a given 224x224 image. """
    # Define VGG_FACE_MODEL architecture
    model = get_vgg_face_model()

    file_path = utils.get_or_download(
        'vgg_face_weights.h5', 'https://drive.google.com/uc?id='
        '1Hut0a6bf3OpL_2kbgyqnYiu0nr_OXYTz')

    # Load VGG Face model weights
    model.load_weights(file_path)

    # Remove last Softmax layer and get model upto last flatten layer #with outputs 2622 units
    vgg_face_extractor = Model(inputs=model.layers[0].input,
                               outputs=model.layers[-2].output)

    return vgg_face_extractor
Example #5
0
def get_age_model() -> keras.models.Model:
    """This function returns age prediction model with it's pre-trained weights loaded."""

    model = get_vgg_face_model()

    base_model_output = Convolution2D(101, (1, 1), name='predictions')(
        model.layers[-4].output)
    base_model_output = Flatten()(base_model_output)
    base_model_output = Activation('softmax')(base_model_output)

    age_model = Model(inputs=model.input, outputs=base_model_output)

    file_path = utils.get_or_download(
        "age_model_weights.h5", "https://drive.google.com/uc?id="
        "1JRZTjxQbR07dNWm9_XhULMWJ-uGeZbZ8")

    age_model.load_weights(file_path)

    return age_model
Example #6
0
    def analyze_video(self, progress_callback):
        """This function gets frames from a given video, crops faces and makes predictions out of them. After making
            predictions, it saves the frames to the hard drive with the help of "savePicture()" function and and shows
            them to the user with prediction labels. Cropped faces will be saved into the ./saved_images folder."""

        self.startButton.setDisabled(True)
        self.print_line(
            "Making preparations, downloading missing model files, please wait...\n\n"
        )
        loading_image = cv2.imread("loading.png")
        self.show_image(loading_image)

        v_pafy = pafy.new(self.video_url.text())
        play = v_pafy.getbest(preftype="mp4")
        cap = cv2.VideoCapture(play.url)

        # here we load our models to make out predictions
        age_model = models.get_age_model()
        gender_model = models.get_gender_model()
        emotion_model, emotion_labels = models.get_emotion_model()
        face_cascade = cv2.CascadeClassifier(
            utils.get_or_download(
                'haarcascade_frontalface_default.xml', 'https://drive.google'
                '.com/uc?id=1vuWt_x_3'
                'QQaMs8nxklmMf-8OtHMB'
                'OM5V'))

        # age model has 101 outputs and its outputs will be multiplied by its index label. sum will be apparent age
        age_output_indexes = np.array([i for i in range(0, 101)])

        utils.delete_contents_of_folder(saved_images_path)

        frame = 0
        frame_width = 720
        self.print_line("Started real-time video analyzer...")
        while not self.windowClosed:
            frame += 1
            for i in range(40):
                cap.read()
            ret, image = cap.read()

            if ret is False:
                break

            image = imutils.resize(image, frame_width)

            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            faces = face_cascade.detectMultiScale(gray, 1.3, 5)

            if len(faces) > 0:
                print("Found {} faces".format(str(len(faces))))
                for (x, y, w, h) in faces:
                    if w > frame_width / 10:
                        # age gender data set has 40% margin around the face. expand detected face.
                        margin = 30
                        margin_x = int((w * margin) / 100)
                        margin_y = int((h * margin) / 100)

                        detected_10margin_face = image[int(y):int(y + h),
                                                       int(x):int(x + w)]

                        try:
                            detected_40margin_face = \
                                image[int(y - margin_y):int(y + h + margin_y), int(x - margin_x): int(x + w + margin_x)]

                            if detected_40margin_face.size == 0:
                                raise Exception()
                        except:
                            detected_40margin_face = detected_10margin_face

                        try:

                            detected_40margin_face = cv2.resize(
                                detected_40margin_face, (224, 224))

                            detected_gray_face = cv2.resize(
                                detected_10margin_face, (48, 48))
                            detected_gray_face = cv2.cvtColor(
                                detected_gray_face, cv2.COLOR_BGR2GRAY)

                            img_pixels = keras.preprocessing.image.img_to_array(
                                detected_40margin_face)
                            img_pixels = np.expand_dims(img_pixels, axis=0)
                            img_pixels /= 255

                            # Predict age and gender
                            age_dists = age_model.predict(img_pixels)
                            apparent_age = str(
                                int(
                                    np.floor(
                                        np.sum(age_dists * age_output_indexes,
                                               axis=1))[0]))

                            gender_distribution = gender_model.predict(
                                img_pixels)[0]
                            gender_index = np.argmax(gender_distribution)

                            detected_gray_face = keras.preprocessing.image.img_to_array(
                                detected_gray_face)
                            detected_gray_face = np.expand_dims(
                                detected_gray_face, axis=0)
                            detected_gray_face /= 255

                            emotion_prediction = emotion_labels[np.argmax(
                                emotion_model.predict(detected_gray_face)[0])]

                            if gender_index == 0:
                                gender = "F"
                            else:
                                gender = "M"

                            # save picture to hard drive
                            classifier.save_picture(detected_10margin_face,
                                                    frame, apparent_age,
                                                    gender, emotion_prediction)

                            # Create an overlay text and put it into frame
                            cv2.rectangle(image, (x, y), (x + w, y + h),
                                          (255, 255, 0), 2)
                            overlay_text = "%s %s %s" % (gender, apparent_age,
                                                         emotion_prediction)
                            cv2.putText(image, overlay_text, (x, y),
                                        cv2.FONT_HERSHEY_SIMPLEX, 1,
                                        (255, 255, 255), 2, cv2.LINE_AA)
                        except Exception as e:
                            print("exception ", e)

                self.show_image(image)
                # 0xFF is a hexadecimal constant which is 11111111 in binary.
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

        standby_image = cv2.imread("standby.jpg")
        self.show_image(standby_image)
        self.print_line("Classifying saved images, please wait...")
        classifier.classify_and_folder_faces()

        self.print_line("Generating report, please wait...")
        total_number_of_images, person_dictionary = classifier.analyze_classified_folders(
        )

        self.print_line(
            classifier.create_report(total_number_of_images,
                                     person_dictionary))