Пример #1
0
def find_faces_and_label():
    img = cv2.imread('images/group.jpg')

    faces = face_recognition.face_locations(img)
    # faces = get_faces(img, position=True, greyscale=False, check_eyes=False)

    tmp = [img[top:bottom, left:right] for top, right, bottom, left in faces]

    predictions = predict_cnn(images=tmp)
    from facial_expressions import predict_expression_svm
    predictions_expressions = predict_expression_svm(tmp)

    for face, prediction, prediction_exp in zip(faces, predictions,
                                                predictions_expressions):
        top, right, bottom, left = face

        label = prediction_exp

        cv2.rectangle(img, (left, top), (right, bottom), (0, 255, 0), 3)
        cv2.putText(img,
                    "%s" % int(prediction), (left + 3, bottom - 4),
                    cv2.FONT_HERSHEY_DUPLEX,
                    2, (0, 170, 0),
                    lineType=cv2.LINE_AA)

    cv2.imwrite('output.jpg', img)
    cv2.imshow('img', cv2.resize(img, None, fx=0.5, fy=0.5))
    cv2.waitKey()
    cv2.destroyAllWindows()
Пример #2
0
def extract_faces_from_groups():
    group_images = list(
        filter(lambda i: i[-3:] in ['jpg'], listdir(group_images_location)))
    print(group_images)

    i = 1
    for group_image_name in group_images:
        image = cv2.imread(group_images_location + group_image_name)
        faces = get_faces(image, greyscale=False)

        for face in faces:
            p1, p2 = predict_cnn(face)

            cv2.imwrite(
                '%s%s-%s.jpg' % (group_image_faces_location, p1, str(i)), face)
            i += 1
Пример #3
0
def extract_faces_from_test_image():
    """
    Method to extract faces from single image with the predicted class as the name of the file.
    :return:
    """
    image = cv2.imread("images/group3.jpg")

    faces = face_recognition.face_locations(image)

    index = 0
    for face in faces:
        top, right, bottom, left = face

        label = predict_cnn(image[top:bottom, left:right])[0]
        cv2.imwrite("test_images/%s-%s.jpg" % (label, index),
                    image[top:bottom, left:right])

        index += 1
Пример #4
0
def make_confusion_matrix():
    """
    Method to create the confusion matrix for the CNN
    :return:
    """
    # image = cv2.imread("images/group.jpg")

    predicted = []
    actual = []
    # faces = face_recognition.face_locations(image)
    #
    # for face in faces:
    #     top, right, bottom, left = face
    #
    #     label = predict_cnn(image[top:bottom, left:right])[0]
    #     predicted.append(label)
    #
    #     print(label)
    #     show_image(image[top:bottom, left:right])
    #     tmp = input()
    #     actual.append(label if tmp == "" else tmp)

    individuals = listdir(face_folders_location + "test/")

    for individual in individuals:
        test_individual_folder_path = face_folders_location + "test/" + individual + "/"
        filenames = listdir(test_individual_folder_path)

        for filename in filenames:
            image = cv2.imread(test_individual_folder_path + filename)
            prediction = predict_cnn(image)[0]
            predicted.append(prediction)
            actual.append(individual)

    print(predicted)
    print(actual)

    return actual, predicted
Пример #5
0
    text_train = get_cleaned_text('../data/test_text')
    text_test = get_cleaned_text('../data/training_text')
    w2v_model = Word2Vec(text_train + text_test,
                         size=vector_size,
                         window=5,
                         min_count=3,
                         workers=4,
                         iter=5)
    return w2v_model


if __name__ == '__main__':
    train_variants = pd.read_csv('../data/training_variants')
    test_variants = pd.read_csv('../data/test_variants')

    n_text = len(train_variants)
    #w2v_model  = create_w2v()
    #w2v_model = Word2Vec.load('w2v_tt200')
    w2v_model = Word2Vec.load('w2v_tt50')

    fvec = get_feature_vector2('../data/training_text', train_variants,
                               n_text + 1)

    wvec_size = w2v_model.layer1_size

    #nn_model = cnn.test(fvec2)
    nn_model = cnn.create_model(fvec[:, :, :], train_variants, epochs=10)

    fvec_test = get_feature_vector2('../data/test_text', test_variants)
    cnn.predict_cnn(nn_model, fvec_test)
Пример #6
0
def RecogniseFace(image="", feature_type=None, classifier_name=None):
    """
    :param image: path to image (relative or absolute)
    :param feature_type: HOG or SURF
    :param classifier_name: CNN or SVM
    :param creative_mode: 0 = False, 1 = True
    :return:
    """

    # Prepare the inputs
    if image is None or classifier_name is None:
        print("Must provide an image and classifier name")
        return None

    classifier_name = classifier_name.upper()

    if not os.path.isfile(image):
        print("Image file could not be found")
        return None

    if classifier_name not in ['CNN', 'SVM', 'KNN']:
        print("Must provide a supported classified name: CNN or SVM or KNN")
        return None

    feature_type = feature_type.upper() if feature_type else ""

    if classifier_name == 'SVM' and feature_type.upper() not in [
            'HOG', 'SURF'
    ]:
        print('Must provide a supported feature type for SVM: HOG or SURF')
        return None

    if classifier_name == 'KNN' and feature_type.upper() not in [
            'HOG', 'SURF'
    ]:
        print('Must provide a supported feature type for KNN: HOG or SURF')
        return None

    image_raw = cv2.imread(image)

    # Inputs are fine, lets start producing the output
    if classifier_name == "CNN":
        face_positions = face_recognition.face_locations(image_raw)

        face_predictions = []

        for (top, right, bottom, left) in face_positions:
            prediction = int(predict_cnn(image_raw[top:bottom, left:right])[0])
            emo = int(
                predict_expression_svm([image_raw[top:bottom, left:right]])[0])
            x, y = int((left + right) / 2), int((top + bottom) / 2)
            face_predictions.append((prediction, x, y, emo))

        return face_predictions

    elif classifier_name == "SVM":
        face_positions = face_recognition.face_locations(image_raw)

        results = predict_svm([
            image_raw[top:bottom, left:right]
            for (top, right, bottom, left) in face_positions
        ], feature_type)

        emo_results = predict_expression_svm([
            image_raw[top:bottom, left:right]
            for (top, right, bottom, left) in face_positions
        ])

        results_positions = list([
            (int(result), int((left + right) / 2), int(
                (top + bottom) / 2), int(emo))
            for result, (
                top, right, bottom,
                left), emo in zip(results, face_positions, emo_results)
        ])

        return results_positions

    elif classifier_name == "KNN":
        face_positions = face_recognition.face_locations(image_raw)

        results = predict_knn([
            image_raw[top:bottom, left:right]
            for (top, right, bottom, left) in face_positions
        ], feature_type)

        emo_results = predict_expression_svm([
            image_raw[top:bottom, left:right]
            for (top, right, bottom, left) in face_positions
        ])

        results_positions = list([
            (int(result), int((left + right) / 2), int(
                (top + bottom) / 2), int(emo))
            for result, (
                top, right, bottom,
                left), emo in zip(results, face_positions, emo_results)
        ])

        return results_positions