Esempio n. 1
0
def addn(save):
    ans = str(speech("Yes or No"))
    if (ans == "yes" or ans == "Yes"):
        saveface(save)
    elif (ans == "no" or ans == "No"):
        ignoreface()
    else:
        generate_sound("Could not understand your response Answer again")
        addn(save)
Esempio n. 2
0
def speech(abc):

    # obtain audio from the microphone
    r = sr.Recognizer()
    with sr.Microphone() as source:
        print("Pls say something....")
        generate_sound(abc)
        audio = r.listen(source)

    # recognize speech
    try:
        print("Google Audio:" + r.recognize_google(audio))
        return (r.recognize_google(audio))
    #  print("Sphinx:" + r.recognize_sphinx(audio))
    except sr.UnknownValueError:
        generate_sound("Could not understand your response Speak again")
        speech(abc)
    except sr.RequestError as e:
        print("error: {0}".format(e))
        generate_sound("Connection Error")
Esempio n. 3
0
while True:
    ret, frame = cap.read()
    facedetect = cv.CascadeClassifier(r'haarcascade_frontalface_default.xml')
    if ret:
        # font = cv.FONT_HERSHEY_SIMPLEX
        cv.imshow("Video", frame)

        if cv.waitKey(5) == ord('p'):

            cv.imwrite('./test.jpg', frame)
            final_caption = p_part.generate_caption(
                './test.jpg')  # create caption
            final_caption = modcap(final_caption)  # remove tags
            print(final_caption)
            generate_sound(final_caption)  # convert to audio

        if cv.waitKey(5) == ord('f'):
            gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
            faces = facedetect.detectMultiScale(gray, 1.3, 5)
            cv.imwrite('./test.jpg', frame)
            try:
                for x, y, w, h in faces:
                    #cv2.imwrite("dset//User."+str(user)+"."+str(sample)+".jpg",gray[y:y+h,x:x+w])
                    save = frame[y:y+h, x:x+w]
                    cv.imwrite('./test.jpg', save)
                    dis, name = f_part.who_is_it('./test.jpg')
                    print(str(dis)+","+name)
                    temp = face_found_cap(name)
                    generate_sound(temp)
                    if(name == 'unknown'):
Esempio n. 4
0
def ignoreface():
    generate_sound("Not saved")
Esempio n. 5
0
while True:
    ret, frame = cap.read()
    facedetect = cv.CascadeClassifier(r'haarcascade_frontalface_default.xml')
    if ret:
        # font = cv.FONT_HERSHEY_SIMPLEX
        cv.imshow("Video", frame)

        if cv.waitKey(1) == ord('p'):

            cv.imwrite('./test.jpg', frame)
            final_caption = p_part.generate_caption(
                './test.jpg')  # create caption
            final_caption = modcap(final_caption)  # remove tags
            print(final_caption)
            generate_sound(final_caption)  # convert to audio

        if cv.waitKey(1) == ord('f'):
            gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
            faces = facedetect.detectMultiScale(gray, 1.3, 5)
            cv.imwrite('./test.jpg', frame)
            known_detected = 0
            unknown_detected = 0
            known_face_list = []
            known_face_dist = []
            try:
                for x, y, w, h in faces:
                    #cv2.imwrite("dset//User."+str(user)+"."+str(sample)+".jpg",gray[y:y+h,x:x+w])
                    save = frame[y:y + h, x:x + w]
                    cv.imwrite('./test.jpg', save)
                    dis, name = f_part.who_is_it('./test.jpg')