Esempio n. 1
0
def extract_faces(emotions):
    print("Extracting faces...")
    for emotion in emotions:
        print("Processing %s data..." % emotion)
        images = glob.glob('../data/raw_emotion/%s/*.jpg' % emotion)
        for file_number, image in enumerate(images):
            frame = cv2.imread(image)
            faces = find_faces(frame)
            for face in faces:
                try:
                    cv2.imwrite(
                        "../data/emotion/%s/%s.jpg" %
                        (emotion, file_number + 1), face[0])
                except:
                    print("Error in processing %s" % image)

    print("Face extraction finished")
Esempio n. 2
0
def analyze_picture(model_emotion, path, window_size, window_name='static'):
    cv2.namedWindow(window_name, WINDOW_NORMAL)
    cv2.namedWindow(window_name, WINDOW_NORMAL)
    if window_size:
        width, height = window_size
        cv2.resizeWindow(window_name, width, height)

    image = cv2.imread(path, 1)
    #out = cv2.resize(image, (350, 350))
    for normalized_face, (x, y, w, h) in find_faces(image):
        emotion_prediction = model_emotion.predict(normalized_face)
        cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
        cv2.putText(image, emotions[emotion_prediction[0]], (x, y - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
    cv2.imshow(window_name, image)
    key = cv2.waitKey(0)
    if key == ESC:
        cv2.destroyWindow(window_name)
Esempio n. 3
0
def start_webcam(model_gender, window_size, window_name, update_time=50):
    cv2.namedWindow(window_name, WINDOW_NORMAL)
    if window_size:
        width, height = window_size
        cv2.resizeWindow(window_name, width, height)

    video_feed = cv2.VideoCapture(0)
    video_feed.set(3, width)
    video_feed.set(4, height)
    read_value, webcam_image = video_feed.read()

    delay = 0
    init = True
    while read_value:
        read_value, webcam_image = video_feed.read()
        for normalized_face, (x, y, w, h) in find_faces(webcam_image):
            if init or delay == 0:
                init = False
                gender_prediction = model_gender.predict(normalized_face)
            if (gender_prediction[0] == 0):
                cv2.rectangle(webcam_image, (x, y), (x + w, y + h),
                              (0, 0, 255), 2)
                cv2.putText(webcam_image, 'Female', (x, y - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255, 0, 0), 2)
                if (female_content() == True):
                    pass
            elif (gender_prediction[0] == 1):
                cv2.rectangle(webcam_image, (x, y), (x + w, y + h),
                              (255, 0, 0), 2)
                cv2.putText(webcam_image, 'Male', (x, y - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255, 0, 0), 2)
                if (male_content() == True):
                    pass
            else:
                print('Hii')
        # delay += 1
        # delay %= 20
        cv2.imshow(window_name, webcam_image)
        key = cv2.waitKey(update_time)
        if key == ESC:
            break

    cv2.destroyWindow(window_name)
Esempio n. 4
0
def test_multi_scale(stride, thresh, show_plots=True):
    params = np.load("face_classifier_params.npy")
    multi_scale_scenes_dir = "../cos429_f17_assignment2_part4/face_data/testing_scenes"
    scene_filenames = glob.glob(multi_scale_scenes_dir + '/*.jpg')

    for i in range(1, len(scene_filenames)):
        path = os.path.abspath(scene_filenames[i])
        filename = os.path.basename(path)
        sys.stdout.write("Detecting faces in {}...\n".format(filename))
        sys.stdout.flush()
        img = imread(path, mode='L')
        out_img = find_faces(img, stride, thresh, params, 9, False)
        imsave(filename + '_out.jpg', out_img)
        if show_plots:
            plt.imshow(out_img, cmap='gray')
            plt.show()
            choice = input("Continue? (y/n): ")
            if choice == 'n':
                return
Esempio n. 5
0
def start_webcam(model_emotion,
                 model_gender,
                 window_size,
                 window_name='live',
                 update_time=50):
    cv2.namedWindow(window_name, WINDOW_NORMAL)
    if window_size:
        width, height = window_size
        cv2.resizeWindow(window_name, width, height)

    video_feed = cv2.VideoCapture(
        "nvcamerasrc ! video/x-raw(memory:NVMM), width=(int)1280, height=(int)720,format=(string)I420, framerate=(fraction)30/1 ! nvvidconv flip-method=0 ! video/x-raw, format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsink"
    )
    # video_feed.set(3, width)
    # video_feed.set(4, height)
    read_value, webcam_image = video_feed.read()

    delay = 0
    init = True
    while read_value:
        read_value, webcam_image = video_feed.read()
        for normalized_face, (x, y, w, h) in find_faces(webcam_image):
            if init or delay == 0:
                init = False
                emotion_prediction = model_emotion.predict(normalized_face)
                gender_prediction = model_gender.predict(normalized_face)
            if (gender_prediction[0] == 0):
                cv2.rectangle(webcam_image, (x, y), (x + w, y + h),
                              (0, 0, 255), 2)
            else:
                cv2.rectangle(webcam_image, (x, y), (x + w, y + h),
                              (255, 0, 0), 2)
            cv2.putText(webcam_image, emotions[emotion_prediction[0]],
                        (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1.5,
                        (255, 0, 0), 2)
        delay += 1
        delay %= 20
        cv2.imshow(window_name, webcam_image)
        key = cv2.waitKey(update_time)
        if key == ESC:
            break

    cv2.destroyWindow(window_name)
def extract_faces(names):
    print("Extracting faces...")
    for name in names:
        print("Processing %s data..." % name)
        images = glob.glob('face_dataset/%s/*.jpg' % name)
        for file_number, image in enumerate(images):
            frame = cv2.imread(image)
            faces = find_faces(frame)
            for face in faces:
                path = ('prepared_faces_dataset/%s/' % name)
                if not os.path.exists(path):
                    print("Creating data folder: %s" % path)
                    os.makedirs(path)
                filepath = path + '%s.jpg' % (file_number + 1)
                try:
                    cv2.imwrite(filepath, face[0])
                except:
                    print("Error in processing %s" % image)
    print("Face extraction finished")
Esempio n. 7
0
def analyze_picture(path, window_size, window_name='static'):
    classifier = load_model('race_cnn_model.h5')
    cv2.namedWindow(window_name, WINDOW_NORMAL)
    cv2.namedWindow(window_name, WINDOW_NORMAL)
    if window_size:
        width, height = window_size
        cv2.resizeWindow(window_name, width, height)
    image = cv2.imread(path, 1)
    for normalized_face, (x, y, w, h) in find_faces(image):
        # imgVector = reshape(normalized_face, (1, 128 * 128))
        gender_prediction, gender_prediction_all = classifier.predict(normalized_face,batch_size = None, verbose = 0, steps = None)
        cv2.rectangle(image, (x,y), (x+w, y+h), (0,0,255), 2)
        cv2.putText(image, gender_prediction, (x, y - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
        print(gender_prediction)
        print(gender_prediction_all)
    cv2.imshow(window_name, image)
    key = cv2.waitKey(0)
    if key == ESC:
        cv2.destroyWindow(window_name)
def show_webcam_and_run(model, window_size=None, window_name='webcam', update_time=10):
    """
    Shows webcam image, detects faces and its emotions in real time and draw emoticons over those faces.
    :param model: Learnt emotion detection model.
    :param window_size: Size of webcam image window.
    :param window_name: Name of webcam image window.
    :param update_time: Image update time interval.
    """
    cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
    if window_size:
        width, height = window_size
        cv2.resizeWindow(window_name, width, height)

    vc = cv2.VideoCapture(0)
    if vc.isOpened():
        read_value, web_cam_image = vc.read()
        cv2.imwrite("images/web.jpg", web_cam_image)
    else:
        print("web camera not found!")
        return

    while read_value:
        for normalized_face, (x, y, w, h) in find_faces(web_cam_image):
            numpy_image = img_to_array(normalized_face)
            cv2.imwrite("images/numpy.jpg", numpy_image)
            image_batch = np.expand_dims(numpy_image, axis=0)
            cv2.imwrite("images/batch.jpg", image_batch)
            prediction = model.predict(image_batch)  # do prediction
            print(prediction)

        cv2.imshow(window_name, web_cam_image)
        read_value, web_cam_image = vc.read()
        key = cv2.waitKey(update_time)

        if key == 27:  # exit on ESC
            break

    cv2.destroyWindow(window_name)
Esempio n. 9
0
def start_webcam(model_emotion,
                 window_size,
                 window_name='live',
                 update_time=50):
    cv2.namedWindow(window_name, WINDOW_NORMAL)
    if window_size:
        width, height = window_size
        cv2.resizeWindow(window_name, width, height)

    video_feed = cv2.VideoCapture(0)
    video_feed.set(3, width)
    video_feed.set(4, height)
    read_value, webcam_image = video_feed.read()

    delay = 0
    init = True
    while read_value:
        read_value, webcam_image = video_feed.read()
        for normalized_face, (x, y, w, h) in find_faces(webcam_image):
            if init or delay == 0:
                init = False
                emotion_prediction = model_emotion.predict(normalized_face)
                test = emotion_prediction[0]
            cv2.putText(webcam_image, emotions[test], (x, y - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255, 0, 0), 2)
            myobj = gTTS(text=emotions[test], lang='en', slow=False)
            myobj.save("output/text.mp3")
            os.system('mpg321 output/text.mp3')

        delay += 1
        delay %= 20
        cv2.imshow(window_name, webcam_image)
        key = cv2.waitKey(update_time)
        if key == ESC:
            break

    cv2.destroyWindow(window_name)
Esempio n. 10
0
def analyze_picture(path):
    classifier = load_model('./race_cnn_model.h5')
    result = []
    result_prob = []
    image = cv2.imread(path, 1)
    count = 0
    for normalized_face, (x, y, w, h) in find_faces(image):
        count = count + 1
        race_prediction = classifier.predict_proba(normalized_face, batch_size=32, verbose=0)[0]
        curr = race_prediction.tolist()
        result_prob.append(curr)
        curr_result = curr.index(max(curr))
        print(curr_result)
        if curr_result == 0:
            cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
            cv2.putText(image, "Hispanic", (x, y - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
            result.append(0)
        elif curr_result == 1:
            cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
            cv2.putText(image, "Caucasian", (x, y - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
            result.append(1)
        elif curr_result == 2:
            cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
            cv2.putText(image, "Asian", (x, y - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
            result.append(2)
        else:
            cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
            cv2.putText(image, "African", (x, y - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
            result.append(3)
    print(result)
    cv2.imwrite(path, image)
    K.clear_session()
    return result, result_prob
def start_webcam(eig_v, window_size, window_name='live', update_time=50):
    cv2.namedWindow(window_name, WINDOW_NORMAL)
    if window_size:
        width, height = window_size
        cv2.resizeWindow(window_name, width, height)

    video_feed = cv2.VideoCapture(0)
    video_feed.set(3, width)
    video_feed.set(4, height)
    read_value, webcam_image = video_feed.read()

    delay = 0
    init = True
    while read_value:
        read_value, webcam_image = video_feed.read()
        for normalized_face, (x, y, w, h) in find_faces(webcam_image):
            if init or delay == 0:
                init = False
                imgVector = reshape(normalized_face, (1, 128 * 128))
                test = imgVector * eig_v
                testMale = np.load("./trained_result/255/male.npy")
                testFemale = np.load("./trained_result/255/female.npy")
                w = np.load("./trained_result/255/lda_vector.npy")
                gender_prediction = fitLDA(test, w, testMale, testFemale)
            if (gender_prediction == 0):
                #cv2.rectangle(webcam_image, (x,y), (x+w, y+h), (0,0,255), 2)
                cv2.putText(webcam_image, "male", (x, y - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255, 0, 0), 2)
            else:
                cv2.putText(webcam_image, "female", (x, y - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 1.5, (0, 0, 255), 2)
        delay += 1
        delay %= 20
        cv2.imshow(window_name, webcam_image)
        key = cv2.waitKey(update_time)
        if key == ESC:
            break
Esempio n. 12
0
def start_webcam(window_size, window_name='live', update_time=50):
    classifier = load_model('gender_cnn_model.h5')
    cv2.namedWindow(window_name, WINDOW_NORMAL)
    if window_size:
        width, height = window_size
        cv2.resizeWindow(window_name, width, height)

    video_feed = cv2.VideoCapture(0)
    video_feed.set(3, width)
    video_feed.set(4, height)
    read_value, webcam_image = video_feed.read()
    delay = 0
    init = True
    while read_value:
        read_value, webcam_image = video_feed.read()
        for normalized_face, (x, y, w, h) in find_faces(webcam_image):
            if init or delay == 0:
                init = False
                # imgVector = reshape(normalized_face, (1, 128 * 128))
                gender_prediction = classifier.predict(normalized_face)
            if (gender_prediction >= 0.5):
                cv2.rectangle(webcam_image, (x, y), (x + w, y + h),
                              (0, 0, 255), 2)
                cv2.putText(webcam_image, "male", (x, y - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255, 0, 0), 2)
            else:
                cv2.rectangle(webcam_image, (x, y), (x + w, y + h),
                              (0, 0, 255), 2)
                cv2.putText(webcam_image, "female", (x, y - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 1.5, (0, 0, 255), 2)
        delay += 1
        delay %= 20
        cv2.imshow(window_name, webcam_image)
        key = cv2.waitKey(update_time)
        if key == ESC:
            break
Esempio n. 13
0
def start_webcam(model_emotion,
                 window_size,
                 window_name='live',
                 update_time=50):
    cv2.namedWindow(window_name, WINDOW_NORMAL)
    if window_size:
        width, height = window_size
        cv2.resizeWindow(window_name, width, height)

    video_feed = cv2.VideoCapture(0)
    video_feed.set(3, width)
    video_feed.set(4, height)
    read_value, webcam_image = video_feed.read()

    delay = 0
    init = True
    while read_value:
        read_value, webcam_image = video_feed.read()
        for normalized_face, (x, y, w, h) in find_faces(webcam_image):
            if init or delay == 0:
                init = False
                emotion_prediction = model_emotion.predict(normalized_face)

                cv2.rectangle(webcam_image, (x, y), (x + w, y + h),
                              (255, 0, 0), 2)
            cv2.putText(webcam_image, emotions[emotion_prediction[0]],
                        (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1.5,
                        (255, 0, 0), 2)
        delay += 1
        delay %= 20
        cv2.imshow(window_name, webcam_image)
        key = cv2.waitKey(update_time)
        if key == ESC:
            break

    cv2.destroyWindow(window_name)
Esempio n. 14
0
def main(lmain, camnum, videoname):
    global happy
    global sad
    global total
    global angry
    global suprised
    global normal
    global count
    global t
    global displaytext
    global timecount
    global check

    happy = 0.00
    sad = 0.00
    angry = 0.00
    suprised = 0.00
    normal = 0.00
    happydb = 0.00
    saddb = 0.00
    angrydb = 0.00
    supriseddb = 0.00
    normaldb = 0.00
    total = 0.00
    t = 0
    timecount = 0
    timecountsecond = 0
    timecountminutes = 0
    check = False

    emotions = ["happy", "sad", "angry", "surprised", "normal"]

    # Load model
    fisher_face_emotion = cv2.face.FisherFaceRecognizer_create()
    fisher_face_emotion.read('models/emotion_classifier_model.xml')

    count = 0

    trainer = Trainer()

    video_feed = cv2.VideoCapture(camnum)

    read_value, webcam_image = video_feed.read()

    init = True
    while read_value:
        face_index = 0
        timecountsecond = 0
        now = datetime.datetime.now()
        timecountsecond = now.second

        if timecount == 0:
            timecount = timecountsecond

        if timecountsecond == 0:
            timecount = 0

        if timecount == 58:
            timecount = -2

        elif timecount == 59:
            timecount = -1

        if timecount + 2 == timecountsecond:
            timecount = timecountsecond
            total = total + 1
            check = True

        read_value, webcam_image = video_feed.read()

        for normalized_face, (x, y, w, h) in find_faces(webcam_image):

            emotion_prediction = fisher_face_emotion.predict(normalized_face)
            cv2.rectangle(webcam_image, (x, y), (x + w, y + h), (255, 0, 0), 2)
            #cv2.putText(webcam_image, emotions[emotion_prediction[0]], (x,y-10), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255,0,0), 2)

            if emotion_prediction[0] == 0:
                happy = happy + 1

            elif emotion_prediction[0] == 1:
                sad = sad + 1

            elif emotion_prediction[0] == 2:
                angry = angry + 1

            elif emotion_prediction[0] == 3:
                suprised = suprised + 1

            elif emotion_prediction[0] == 4:
                normal = normal + 1

            if check == True:
                check = False

                if happy >= sad and happy >= angry and happy >= suprised and happy >= normal:
                    cv2.putText(webcam_image, "HAPPY", (x, y - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255, 0, 0), 2)
                    happydb = happydb + 1

                elif sad >= happy and sad >= angry and sad >= suprised and sad >= normal:
                    cv2.putText(webcam_image, "SAD", (x, y - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255, 0, 0), 2)
                    saddb = saddb + 1

                elif angry >= happy and angry >= sad and angry >= suprised and angry >= normal:
                    cv2.putText(webcam_image, "ANGRY", (x, y - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255, 0, 0), 2)
                    angrydb = angrydb + 1

                elif suprised >= happy and suprised >= angry and suprised >= sad and suprised >= normal:
                    cv2.putText(webcam_image, "SURPRISED", (x, y - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255, 0, 0), 2)
                    supriseddb = supriseddb + 1

                else:
                    cv2.putText(webcam_image, "NORMAL", (x, y - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255, 0, 0), 2)
                    normaldb = normaldb + 1

                happy = 0.00
                sad = 0.00
                angry = 0.00
                suprised = 0.00
                normal = 0.00

        if total == 29:
            timecountminutes = timecountminutes + 1
            total = 0

            # increment counter
            face_index += 1
            count = count + 1

        if timecountminutes == 15:

            t = now.strftime("%H:%M:%S")

            with sqlite3.connect('database.db') as db:
                c = db.cursor()

            savetemp = 'INSERT INTO TEMP(PRODUCT_CODE,times,happy,sad,angry,suprised,normal) VALUES (?,?,?,?,?,?,?)'
            c.execute(
                savetemp,
                [lmain, t, happydb, saddb, angrydb, supriseddb, normaldb])

            db.commit()
            timecountminutes = 0
            count = 0.00
            happy = 0.00
            sad = 0.00
            angry = 0.00
            suprised = 0.00
            normal = 0.00
            happydb = 0.00
            saddb = 0.00
            angrydb = 0.00
            supriseddb = 0.00
            normaldb = 0.00

        # Display the resulting frame
        cv2.imshow(videoname, webcam_image)
        if cv2.waitKey(10) & 0xFF == 27:
            break

    video_feed.release()
    cv2.destroyAllWindows()
Esempio n. 15
0
print("- Type 'q' to quit")
print("- Type 's' to save a screenshot")

while True:
    i = 0
    # Grab a single frame of video
    ret, frame = video_capture.read()

    # Resize frame of video to 1/4 size for faster face recognition processing
    small_frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)

    # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
    rgb_small_frame = small_frame[:, :, ::-1]

    for normalized_face, (x, y, w, h) in find_faces(small_frame):
        x *= 2
        y *= 2
        h *= 2
        w *= 2
        energy = 0

        face_prediction, confidence = lbph_face_name.predict(normalized_face)
        emotion_prediction = fisher_face_emotion.predict(normalized_face)
        if (len(energies) - 1 < i):
            energies.insert(i, starting_energy)
            dictionaries.insert(i, defaultdict(list))
            labels.insert(i, 0)
            scores.insert(i, 0)
        label, score, energy = cache_results(face_prediction, confidence,
                                             dictionaries[i], energies[i],
Esempio n. 16
0
    run_loop = True
    window_name = "Facifier Static (press ESC to exit)"
    # print("Default path is set to data/sample/")
    # print("Type q or quit to end program")
    # while run_loop:
    path = "../data/sample/"
    file_name = "test_m7s4rRh.png"
    # input("Specify image file: ")

    path += file_name
    model_emotion = fisher_face_emotion
    model_gender = fisher_face_gender
    window_size = (1280, 720)

    image = cv2.imread(path, 1)
    for normalized_face, (x, y, w, h) in find_faces(image):
        emotion_prediction = model_emotion.predict(normalized_face)
        gender_prediction = model_gender.predict(normalized_face)
        if (gender_prediction[0] == 0):
            cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
        else:
            cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
    print(emotion_prediction[0])

    # if file_name == "q" or file_name == "quit":
    # run_loop = False
    # else:
    # path += file_name
    # if os.path.isfile(path):
    #     analyze_picture(fisher_face_emotion, fisher_face_gender, path, window_size=(1280, 720), window_name=window_name)
    # else:
Esempio n. 17
0
def main():
    if flask.request.method == 'GET':
        # clear faces and uploads folder here
        face_not_found = False
        return flask.render_template('main.html')

    if flask.request.method == 'POST':
        sample_image = flask.request.args.get('sample_image')
        filename = None
        if sample_image:
            file = open(os.path.join(APP_ROOT, 'static', sample_image), "rb")
            # wrap file in werkzeug filestorage class to be compatible with our code below
            file = FileStorage(file,
                               content_type=('image/' +
                                             str(sample_image.split('.')[1])))
            filename = secure_filename(file.filename)
            filename = filename.split('static_')[1]
        else:
            file = flask.request.files['file']
            filename = secure_filename(file.filename)

        if file:
            original_image_path = os.path.join(app.config['UPLOAD_FOLDER'],
                                               filename)
            file.save(
                original_image_path
            )  # saving original image before we resize, to display later

            if os.name == 'nt':
                original_image_path = original_image_path.replace("\\", "/")

            original_image_path = 'static' + original_image_path.split(
                'static')[1]  # only keep path after 'static'

            faces = find_faces(
                original_image_path,
                FACES_FOLDER)  # returns a list of Faces from image
            gender_prediction = None

            if len(faces) > 0:
                for face in faces:
                    img = skimage.io.imread(
                        face.image_path
                    )  # face that has been cropped from original image
                    img = skimage.color.rgb2gray(img)  # convert to grayscale
                    img = skimage.transform.resize(
                        image=img,
                        output_shape=(48,
                                      48))  # downsize to size of our dataset
                    mod_image_path = (
                        os.path.splitext(original_image_path)[0] +
                        '_downscaled' +
                        os.path.splitext(original_image_path)[1])
                    skimage.io.imsave(
                        mod_image_path, img
                    )  # save our modified file that we use with our model

                    #tf_img = img.reshape(1, 48, 48, -1)

                    img = img.ravel()  # flatten 48*48 array to 1x(48*48)
                    img = img * 255  # our sk-learn model expects int value for each pixel 0 - 255
                    img = img.astype(int)

                    gender_prediction = gender_classifier.predict([img])
                    if gender_prediction[0] == 0:
                        face.gender = 'Male'
                    elif gender_prediction[0] == 1:
                        face.gender = 'Female'
                    else:
                        face.gender = None  # error

                    age_pred = age_predictor.predict([img])
                    #age_pred = tf.argmax(age_pred, axis=-1)
                    face.age = create_model.get_age_range(age_pred)

                    if os.name == 'nt':
                        face.image_path = face.image_path.replace("\\", "/")

                    face_found = 'static' + newpath.split('static')[
                        1]  # only keep path after 'static'
                    face.image_path = face_found  # easier format to print in our html page

                return flask.render_template(
                    'classify_image.html',
                    faces=faces,
                    original_image=original_image_path)

            else:
                # file uploaded, but no face found in image
                return flask.render_template('main.html', face_not_found=True)

    return flask.render_template('main.html')
Esempio n. 18
0
def start_webcam(model_emotion,
                 model_gender,
                 window_size,
                 window_name='live',
                 update_time=50):
    cv2.namedWindow(window_name, WINDOW_NORMAL)
    if window_size:
        width, height = window_size
        cv2.resizeWindow(window_name, width, height)

    video_feed = cv2.VideoCapture(0)
    video_feed.set(3, width)
    video_feed.set(4, height)
    read_value, webcam_image = video_feed.read()

    delay = 0
    init = True
    while read_value:
        read_value, webcam_image = video_feed.read()
        ap = argparse.ArgumentParser()
        ap.add_argument("-p",
                        "--shape-predictor",
                        required=True,
                        help="path to facial landmark predictor")
        args = vars(ap.parse_args())
        detector = dlib.get_frontal_face_detector()
        predictor = dlib.shape_predictor(args["shape_predictor"])
        for normalized_face, (x, y, w, h) in find_faces(webcam_image):
            rects = detector(webcam_image, 1)
            for (i, rect) in enumerate(rects):
                shape = predictor(normalized_face, rect)
                shape = face_utils.shape_to_np(shape)
                output = face_utils.visualize_facial_landmarks(
                    webcam_image, shape)
                #if init or delay == 0:
                #init = False
                emotion_prediction = model_emotion.predict(normalized_face)
                gender_prediction = model_gender.predict(normalized_face)
                if (gender_prediction[0] == 0):
                    cv2.rectangle(webcam_image, (x, y), (x + w, y + h),
                                  (0, 0, 255), 2)
                    cv2.imshow('video', output)
                    cv2.putText(webcam_image, "Gender predictor: Female",
                                (x, y - 50), cv2.FONT_HERSHEY_SIMPLEX, 1,
                                (0, 255, 255), 2)
                else:
                    cv2.rectangle(webcam_image, (x, y), (x + w, y + h),
                                  (255, 0, 0), 2)
                    cv2.imshow('video', output)
                    cv2.putText(webcam_image, "Gender predictor: Male",
                                (x, y - 50), cv2.FONT_HERSHEY_SIMPLEX, 1,
                                (0, 255, 255), 2)

                cv2.putText(webcam_image, emotions[emotion_prediction[0]],
                            (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1.5,
                            (255, 0, 0), 2)
        #delay += 1
        #delay %= 20
        cv2.imshow(window_name, webcam_image)
        key = cv2.waitKey(update_time)
        if key == ESC:
            break

    cv2.destroyWindow(window_name)