Ejemplo n.º 1
0
#Initialize the recognizer
r = sr.Recognizer()
 
#generate a list of all audio cards/microphones
mic_list = sr.Microphone.list_microphone_names()
 
#the following loop aims to set the device ID of the mic that
#we specifically want to use to avoid ambiguity.
for i, microphone_name in enumerate(mic_list):
    if microphone_name == mic_name:
        device_id = i
 
#use the microphone as source for input. Here, we also specify 
#which device ID to specifically look for incase the microphone 
#is not working, an error will pop up saying "device_id undefined"
with sr.Microphone( sample_rate = sample_rate, 
                        chunk_size = chunk_size) as source:
    #wait for a second to let the recognizer adjust the 
    #energy threshold based on the surrounding noise level
    r.adjust_for_ambient_noise(source)
	
    data =  "Hi Aarun "
    ts.text_to_speech(data)
    print data
    #listens for the user's input
    audio = r.listen(source)
         
    try:
        text = r.recognize_google(audio)
        
	rs.check_response(text)
	
Ejemplo n.º 2
0
}

engine = pyttsx3.init()
engine.setProperty('rate', 120)

r = sr.Recognizer()

time = datetime.datetime.today().strftime("%H:%M:%S")

folder = []

helloMaster()

while True:
    try:
        with sr.Microphone(device_index=1) as source:
            print("Say something ...")
            audio = r.listen(source, timeout=1, phrase_time_limit=3)
    except sr.WaitTimeoutError:
        with sr.Microphone(device_index=1) as source:
            print("Say something ...")
            audio = r.listen(source, timeout=1, phrase_time_limit=3)

    try:

        voice = r.recognize_google(audio, language="ru-Ru").lower()
        print("You say " + voice)

        if voice == "фак ю":
            bulling()
Ejemplo n.º 3
0
def main():
    print("Welcome to our STT shop.")
    input_type = InputType.NONE
    action = phrase2action.Actions.NONE
    while action is not phrase2action.Actions.EXIT:
        if input_type is InputType.NONE:
            print(
                """Do you want to use stt for this action?(Y/n). You can later switch input method via "change input method" command"""
            )
            response = input()
            if 'y' in response.lower():
                input_type = InputType.SPEECH
            else:
                input_type = InputType.TEXT
        query = ''
        if input_type is InputType.SPEECH:
            r = sr.Recognizer()
            audio = None
            with sr.Microphone() as source:
                print("Speak now\n")
                audio = r.listen(source)
            try:
                query = r.recognize_google(audio)
                print(query)
            except (LookupError, sr.UnknownValueError):
                pass
        else:
            query = input()
        action = phrase2action.find_action(query)
        if action[1] == phrase2action.Actions.EXIT:
            print(phrase2action.reply[action[1]][random.randrange(
                len(phrase2action.reply[action[1]]))])
            exit()

        if action[1] == phrase2action.Actions.INP_CH:
            if input_type is InputType.SPEECH:
                input_type = InputType.TEXT
                print('input type switched to text')
            else:
                input_type = InputType.SPEECH
                print('input type switched to speech')

        if action[1] == phrase2action.Actions.GREET:
            print(phrase2action.reply[action[1]][random.randrange(
                len(phrase2action.reply[action[1]]))])

        if action[1] == phrase2action.Actions.WAR_PER:
            print(phrase2action.reply[action[1]][random.randrange(
                len(phrase2action.reply[action[1]]))])

        if action[1] == phrase2action.Actions.TIME_SHIP:
            print(phrase2action.reply[action[1]][random.randrange(
                len(phrase2action.reply[action[1]]))])

        if action[1] == phrase2action.Actions.NONE:
            print('What do you mean?')

        cnt = 0
        if action[1] == phrase2action.Actions.SHOW_GLASSES:
            print(phrase2action.reply[action[1]][random.randrange(
                len(phrase2action.reply[action[1]]))])
            print(
                'You can press <- arrow if you don;t like them or -> right arrow if you do like. Simple Tinder like control :)'
            )
            glass_templ = glasses_and_hats.load_resources('glass_resources')
            for t in glass_templ:
                response = glasses_and_hats.show_webcam(
                    t, glasses_and_hats.add_glasses, 0)
                if response == phrase2action.Actions.Y:
                    print(
                        'Nice choice! Would you like to proceed to checkout?')
                    payment()
                    break
                else:
                    cnt += 1
                if cnt == len(glass_templ):
                    print('For now we donnt have more glasses options')
                    print('Would you like to get something else?')

        cnt = 0
        if action[1] == phrase2action.Actions.SHOW_HAT:
            print(phrase2action.reply[action[1]][random.randrange(
                len(phrase2action.reply[action[1]]))])
            print(
                'You can press <- arrow if you don;t like them or -> right arrow if you do like. Simple Tinder like control :)'
            )
            hat_templ = glasses_and_hats.load_resources('hat_resources')
            for t in hat_templ:
                response = glasses_and_hats.show_webcam(
                    t, glasses_and_hats.add_hat, 0)
                if response == phrase2action.Actions.Y:
                    print(
                        'Nice choice! Would you like to proceed to checkout?')
                    payment()
                    break
                else:
                    cnt += 1
                if cnt == len(hat_templ):
                    print('For now we dont have more hat options')
                    print('Would you like to get something else?')
Ejemplo n.º 4
0
    decoder.end_utt()
    hypothesis = decoder.hyp()
    try:
        commands = hypothesis.hypstr.split()
        command, value = getCommand(hypothesis.hypstr)
        #ser.write(value)
        print commands, command, value
    except:
        print "nada reconocido"


print "creando objetos para el reconocimiento"
r = sr.Recognizer()
r.energy_threshold = 500  # minimum audio energy to consider for recording
r.pause_threshold = 0.5  # seconds of non-speaking audio before a phrase is considered complete
r.phrase_threshold = 0.2  # minimum seconds of speaking audio before we consider the speaking audio a phrase - values below this are ignored (for filtering out clicks and pops)
r.non_speaking_duration = 0.4  # seconds of non-speaking audio to keep on both sides of the recording

try:
    print "crea fuente"
    source = sr.Microphone()
    print "ajusta ruido"
    #r.adjust_for_ambient_noise(source, duration = 1)

    print "habla..."
    stop_listen = r.listen_in_background(source, callback)

    while True:
        pass
except:
    pass
Ejemplo n.º 5
0
import speech_recognition as sr  # import the library

r = sr.Recognizer()  # initialize recognizer
with sr.Microphone(
) as source:  # mention source it will be either Microphone or audio files.
    print("Speak Anything :")
    audio = r.listen(source)  # listen to the source
    try:
        text = r.recognize_google(
            audio)  # use recognizer to convert our audio into text part.
        print("You said : {}".format(text))
    except:
        print("Sorry could not recognize your voice")
Ejemplo n.º 6
0
def get_audio():
    print("I am listening...")
    playsound.playsound("Soft Beep Sound Effect.mp3")
    r = sr.Recognizer()
    with sr.Microphone() as source:
        audio = r.listen(source)
        said = ""

        try:

            print("processing sound...")
            said = r.recognize_google(audio, language="ro-RO")
            print(said)
            speak("Ai spus: " + said)

            #region Voice Commands
            if "timp" in said or "ceasul" in said:
                t = time.localtime()
                current_time = time.strftime("%H:%M", t)
                speak("Ora e " + current_time)
            if "data" in said:
                d = datetime.datetime.now().date()
                speak("Azi e :" + str(d))
            if "gluma" in said:
                speak("Cra cra crou loveste din nou")
            if "deschide" in said:
                if "Tim" in said:
                    speak("Deschid Tim")
                    os.startfile(r"C:\Users\Victor\AppData\Roaming\Microsoft\Windows\Start Menu\Programs\Microsoft Teams.lnk")
                if "discord" in said:
                    speak("Deschid discord")
                    os.startfile(r"C:\Users\Victor\AppData\Roaming\Microsoft\Windows\Start Menu\Programs\Hammer & Chisel, Inc\Discord.lnk")
                if "vis" in said:
                    speak("Deschid Visual studio code")
                    os.startfile(r"C:\Users\Victor\AppData\Roaming\Microsoft\Windows\Start Menu\Programs\Visual Studio Code\Visual Studio Code.lnk")
                if "Google" in said:
                    speak("Deschid google chrome")
                    os.startfile(r"C:\ProgramData\Microsoft\Windows\Start Menu\Programs\Google Chrome.lnk")
            if "caută" in said or "cauta" in said:
                searchContent = ""
                if "caută" in said:
                    searchContent = said.__str__().replace("caută","")
                elif "cauta" in said:
                    searchContent = said.__str__().replace("cauta", "")
                driver = webdriver.Chrome(PATH)
                driver.get("https://www.google.com/")
                time.sleep(0.5)
                driver.switch_to.frame(0)
                driver.find_element_by_xpath("/html/body/div/c-wiz/div[2]/div/div/div/div/div[2]/form/div").click()
                searchBar = driver.find_element_by_xpath("/html/body/div/div[2]/form/div[2]/div[1]/div[1]/div/div[2]/input")
                searchBar.send_keys(searchContent)
                time.sleep(1)
                pyautogui.press("enter")
            if "insluta" in said:
                speak(random.choice(insults))




            #endregion

            #region AskToContinue
            answer = input("Do you want to continue y/n :")
            answer.lower()
            if (answer == "y"):
                print("Speak now")
                get_audio()
            elif (answer == "n"):
                print("Thanks for using this voice assistant")
                return
            #endregion
        except Exception as e:
            print("Exception: " + str(e))
            # region AskToContinue
            answer = input("Do you want to continue y/n :")
            answer.lower()
            if (answer == "y"):
                print("Speak now")
                get_audio()
            elif (answer == "n"):
                print("Thanks for using this voice assistant")
                return
            # endregion

        return said
Ejemplo n.º 7
0
Archivo: Noela.py Proyecto: Nootb/Noela
    def OnEnter(self, event):
        input = self.txt.GetValue()
        input = input.lower()

        #if input field is empty, try to get users speech
        if input == '':
            r = sr.Recognizer()
            with sr.Microphone() as source:
                audio = r.listen(source)
            try:
                self.txt.SetValue(r.recognize_google(audio))
                id.send('\x0D')
            except sr.UnknownValueError:
                print("Google Speech Recognition could not understand audio")
            except sr.RequestError as e:
                print(
                    "Could not request results from Google Speech Recognition service;{0}"
                    .format(e))

        else:
            try:
                #wolframalpha:
                app_id = "YOUR WOLFRAMALPHA API"
                client = wolframalpha.Client(app_id)
                res = client.query(input)
                answer = next(res.results).text
                print(answer)
                say("The answer is '" + answer + "' ")
            except:
                print("Can't find the answer in wolframalpha, going to next")

            try:
                # Opens any file/app  in specified folder (e.g start Desktop\discord) (spacing not supported yet)
                if input.startswith('start'):
                    input = input.split(' ')  # ~ split input by spacing
                    input = " ".join(
                        input[1:])  # ~ join it, except for the first  word
                    print("Opening" + input)
                    os.system(r'start ' + 'C:\\Users\\leno\\' + input)
            except:
                print("No files found")

            try:
                if input == ('open wallpaper engine'):
                    print("Opening" + input)
                    subprocess.call([
                        'C:\\Program Files (x86)\\Steam\\steamapps\\common\\wallpaper_engine\\wallpaper32.exe'
                    ])
            except:
                print("Can't Open, wrong name")

            try:
                print("No files found")
                print("hey")

            except:
                #wikipedia:
                '''
                making the app more intuitive
                1. we take the input and divide every word into its own separate string
                2. after that, we take out the first two words of that string and leave 
                the rest to be the input
                '''
                input = input.split(' ')  # ~ split input by spacing
                input = " ".join(
                    input[2:])  # ~ join it, except for the first two words
                print(wikipedia.summary(input))
                say("Searching for " + input)
                summary = wikipedia.summary(input)
                say(" '" + summary + "' ")
import speech_recognition as sr

r = sr.Recognizer()

# Utilizando microfone como fonte de áudio
with sr.Microphone() as fonte:
    copia_arquivo = 1
    r.adjust_for_ambient_noise(fonte)  # Cancelando ruídos
    print('Diga alguma coisa: ')
    while True:
        audio = r.listen(fonte)  # Captura de voz

        # Salvando a voz reconhecida em um arquivo wav
        with open(f'voz_{copia_arquivo}.wav', 'wb') as f:
            f.write(audio.get_wav_data())
        texto = r.recognize_google(
            audio, language='pt-BR')  #Reconhecendo voz em português
        copia_arquivo += 1
        try:
            print('Você disse: ' + texto)
        except sr.UnknownValueError:
            print('Não foi possível compreender o que você disse')
        except sr.RequestError as e:
            print('Erro ao executar recognize_google, {0}'.format(e))
import speech_recognition as sr

r = sr.Recognizer()

speech = sr.Microphone()

with speech as source:
    print("say something!!....")
    audio = r.adjust_for_ambient_noise(source)
    audio = r.listen(source)

try:
    recog = r.recognize_wit(audio, key = "YOUR_KEY")
    print("You said: " + recog)
except sr.UnknownValueError:
    print("could not understand audio")
except sr.RequestError as e:
    print("Could not request results ; {0}".format(e))


Ejemplo n.º 10
0
def micListen(r):
    mic = sr.Microphone()
    playsound("listen_noise.mov")
    with mic as source:
        print("\n---Listening...\n")
        return r.listen(source)
Ejemplo n.º 11
0
#!/usr/bin/python3
#import urllib.parse for encoding data from HTML
import urllib.parse
#import urllib.request for fetching urls
import urllib.request
#import re to match a particular string to the regular expression
import re
##providing high level interface to allow displaying web based documents to user.
import webbrowser
##taking input in voice
import speech_recognition as sr
r = sr.Recognizer()
mic = sr.Microphone(device_index=4)
with mic as source:
    r.adjust_for_ambient_noise(source)
    print("say somthing !!!")
    audio = r.listen(source, timeout=25)
text = r.recognize_google(audio)
print(text)

query = urllib.parse.urlencode({'search_query': str(text)})
print(query)

html_content = urllib.request.urlopen("http://www.youtube.com/results?" +
                                      query)
print(html_content)
url = "https://www.youtube.com/watch?v="
videos = re.findall(r'href=\"\/watch\?v=(.{11})', html_content.read().decode())
webbrowser.open_new(url + videos[0])
Ejemplo n.º 12
0
#generate a list of all audio cards/microphones
mic_list = sr.Microphone.list_microphone_names()
print(mic_list)

#the following loop aims to set the device ID of the mic that
#we specifically want to use to avoid ambiguity.
for i, microphone_name in enumerate(mic_list):
    if microphone_name == mic_name:
        device_id = i

#use the microphone as source for input. Here, we also specify
#which device ID to specifically look for incase the microphone
#is not working, an error will pop up saying "device_id undefined"
with sr.Microphone(device_index=device_id,
                   sample_rate=sample_rate,
                   chunk_size=chunk_size) as source:
    #wait for a second to let the recognizer adjust the
    #energy threshold based on the surrounding noise level
    r.adjust_for_ambient_noise(source)
    print("Say Something")
    #listens for the user's input
    audio = r.listen(source, timeout=15)
    print("finished")
    try:
        text = r.recognize_google(audio, language='TR')
        print("you said: " + text)

    #error occurs when google could not understand what was said

    except sr.UnknownValueError:
def Voice():
    engineio = pyttsx3.init()
    voices = engineio.getProperty('voices')
    engineio.setProperty('rate', 130)
    engineio.setProperty('voice', voices[0].id)

    def speak(text):
        engineio.say(text)
        engineio.runAndWait()

    engineio = pyttsx3.init()
    voices = engineio.getProperty('voices')

    r = sr.Recognizer()
    #r.energy_threshold = 4000
    mic = sr.Microphone()

    while 1:

        with mic as source:
            print('Listening...')
            text = '...'
            speak(text)

            audio = r.listen(source)
            #r.adjust_for_ambient_noise(source, duration=0.5)
            #r.dynamic_energy_threshold = True        # listen to the source
            try:
                # use recognizer to convert our audio into text part.
                text = r.recognize_google(audio)
                print("You said : {}".format(text))
            except:
                # In case of voice not recognized  clearly
                print("Sorry could not recognize your voice")
                speak("Could not understand")

        if text == 'exit':
            equation.set('Good Bye :D')
            root.update_idletasks()
            speak("Good Bye")
            exit()
            break
            #sys.exit("Thankyou !")
        elif text == 'clear':
            equation.set(" ")

        a = ''
        b = ''
        i = 0
        j = 0
        c = len(text)
        while text[j] != ' ':
            a = a + text[j]
            j = j + 1
        j = j + 1
        ch = text[j]
        if ch == 'd':
            while text[j] != 'y':
                j = j + 1
            #j=j+1
        j = j + 2
        while j != c:
            b = b + text[j]
            j = j + 1

        a = float(a)
        b = float(b)

        ans = 0
        if ch == '+':
            ans = a + b
            equation.set(ans)
            root.update_idletasks()

        elif ch == '-':
            ans = a - b
            equation.set(ans)
            root.update_idletasks()

        elif ch == 'x' or ch == 'X':
            ans = a * b
            equation.set(round(ans, 2))
            root.update_idletasks()

        elif ch == '/' or ch == 'd':
            if b != 0:
                ans = a / b
                equation.set(round(ans, 2))
                root.update_idletasks()
            else:
                speak("Not defined")
                ans = 0
                #equation.set(round(ans,2))
                root.update_idletasks()
        elif ch == '^':
            ans = math.pow(a, b)
            equation.set(ans)
            root.update_idletasks()
        else:
            ans = "Invalid Command"

        print(round(ans, 2))
        equation.set(ans)
        speak("The answer is {}".format(round(ans, 2)))
Ejemplo n.º 14
0
def take_input(data): #takes audio input using pyaudio and speech recognition
    with sr.Microphone(sample_rate=16000, device_index=data['wake']['device_index']) as source: #16khz is the sample rate expected by models
        r.adjust_for_ambient_noise(source) #adjusting to background noise
        playsound("Speak_now_message", data) #tells user to speak now
        audio = r.listen(source)
    return audio #returns audio input
Ejemplo n.º 15
0
import speech_recognition as sr
import webbrowser

print("Welcome")

r = sr.Recognizer()

with sr.Microphone(
) as source:  # 'with' keyword is used to connect Python with Microphone and store the variable named as 'source '
    print("Start Saying, we are listening....")
    audio = r.listen(
        source
    )  # listen() will listen the voice and store into the variable named as 'audio'
    print("We got your requirement, please Wait!")

ch = r.recognize_google(audio)

if (("run" or "start" or "launch") and ("Linux" in ch)):
    webbrowser.open("http://Your IP Address/mypage.html")
elif (("exit" or "quit") in ch):
    exit()
else:
    print("Wrong choice")
 def __init__(self):
     self.cm_threshold = 0
     self.mic = sr.Microphone()
     sr.Recognizer.__init__(self)
                        browser.maximize_window()
                        browser.get('https://www.google.com')
                        search = browser.find_element_by_name("q")
                        search.send_keys(" ".join(word))
                        search.send_keys(Keys.RETURN)

            command.clear()

        else:
            print("Error reading command")
            command.clear()
    else:
        return p


with sr.Microphone() as source:
    bools = True
    while bools:
        try:
            bool = True
            print("Speak anything: ")
            r.pause_threshold = 1
            r.adjust_for_ambient_noise(source, duration=1)
            audio = r.listen(source)
            text = r.recognize_google(audio)
            print("You said: {}".format(text))
            if text == "OK Google":
                print("YES")
                print(bot_text + welcome)
                property_setting(140, 1)
                botReply.say(welcome)
 def __init__(self):
     self.base_dir = os.path.dirname(os.path.abspath(__file__))
     self.res_dir = os.path.join(self.base_dir,
                                 'snowboy_resource/resources/')
     self.mic = sr.Microphone()
     sr.Recognizer.__init__(self)
Ejemplo n.º 19
0
# -*- coding: utf-8 -*-
"""
Created on Fri Feb  8 19:37:52 2019

@author: Meet
"""
#code for converting speech into text
import serial
import speech_recognition as sr
import time

r = sr.Recognizer()
mic = sr.Microphone()

with mic as source:
    print("speak anything")
    r.adjust_for_ambient_noise(source)
    audio = r.listen(source)

    try:
        text = r.recognize_google(audio)
        transmit = text.lower()
        print('{}'.format(transmit))

    except:
        print('sorry we could not recognize your voice')

    # code for assigning character to the given speech input

if transmit == 'all led off' or transmit == 'all led of' or transmit == 'turn off all the led' or transmit == 'turn of all the led' or transmit == 'turn off all the leds' or transmit == 'turn of all the leds' or transmit == 'turn off all the lights' or transmit == 'turn of all the lights':
    transmit = 'a'
Ejemplo n.º 20
0
        sho = [
            'Вашего кота зовут Манник, вы вроде мне про него рассказывали ./Он вообще такой дебил, что написать про это можно целую книгу . Живёт он с вами, просит вечно жрачку, на что конечно-же уходит много денег .',
            'Могу сказать про него в переносном смысле - мужик который нихрена не делает и сидиит днями за телевизором . Чем-то похож на вас.',
            'Серая мышь, которая бесит своим поведением',
            'Довольно милая, но агрессивная зараза.'
        ]
        ka = random2.choice(sho)
        speak(ka)

    else:
        print('Команда не распознана, повторите!')


# запуск
r = sr.Recognizer()
m = sr.Microphone(device_index=1)

speak_engine = pyttsx3.init()

with m as source:
    r.adjust_for_ambient_noise(source)

speak("Добрый день, хозяин")
speak("Ванилла слушает")

while True:
    with m as source:
        audio = r.listen(source)

    callback(r, audio)
    time.sleep(0.1)
# Importing necessary modules required
import speech_recognition as spr
from googletrans import Translator
from gtts import gTTS
import os


# Creating Recogniser() class object
recog1 = spr.Recognizer()

# Creating microphone instance
mc = spr.Microphone()


# Capture Voice
with mc as source:
	print("Speak 'hello' to initiate the Translation !")
	print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
	recog1.adjust_for_ambient_noise(source, duration=0.2)
	audio = recog1.listen(source)
	MyText = recog1.recognize_google(audio)
	MyText = MyText.lower()

# Here initialising the recorder with
# hello, whatever after that hello it
# will recognise it.
if 'hello' in MyText:

	# Translator method for translation
	translator = Translator()
Ejemplo n.º 22
0
    "what did you say"
]
SELECT_RESPONSES = ["select", "i want that one", "give me that"]

RECIPIE_WORDS = ["cook", "recipie", "kitchen"]

#### Light related #####
LIGHTS_WORDS = [
    "illuminate", "lights", "bright", "brighten", "light", "torches"
]
DARK_WORDS = [
    "darkness", "dark", "off", "darkness", "goodnight", "sweet dreams"
]

r = sr.Recognizer()
mic = sr.Microphone()  # decide the microphone based on input to the function


def listen(wait_timeout=None):
    # initalize
    # r1 = sr.Recognizer()
    mic1 = sr.Microphone(
    )  # decide the microphone based on input to the function

    with mic1 as source:
        # r.adjust_for_ambient_noise(source) # if noisy
        try:
            audio = r.listen(source, timeout=wait_timeout)
            return r.recognize_google(
                audio_data=audio
            )  ## extend with all-results param to get other possibles
Ejemplo n.º 23
0
    except sr.UnknownValueError:
        # speech was unintelligible
        response["error"] = "Unable to recognize speech"

    return response


if __name__ == "__main__":
    # set the list of words, maxnumber of guesses, and prompt limit
    WORDS = ["apple", "banana", "grape", "orange", "mango", "lemon"]
    NUM_GUESSES = 3
    PROMPT_LIMIT = 5

    # create recognizer and mic instances
    recognizer = sr.Recognizer()
    microphone = sr.Microphone()

    # get a random word from the list
    word = random.choice(WORDS)

    # format the instructions string
    instructions = (
        "I'm thinking of one of these words:\n"
        "{words}\n"
        "You have {n} tries to guess which one.\n"
    ).format(words=', '.join(WORDS), n=NUM_GUESSES)

    # show instructions and wait 3 seconds before starting the game
    print(instructions)
    time.sleep(3)
Ejemplo n.º 24
0
input("Press enter to start.")

f = open('output/%s.txt' %
         datetime.datetime(1970, 1, 1).now().isoformat().split('.')[0].replace(
             '-', '').replace(':', ''),
         'w',
         encoding='utf8')
falt = open(
    'output/%s-alt.txt' %
    datetime.datetime(1970, 1, 1).now().isoformat().split('.')[0].replace(
        '-', '').replace(':', ''),
    'w',
    encoding='utf8')

r = sr.Recognizer()
mic1 = sr.Microphone()

running = True

yaegiCount = 0
toFilter = '얘기'

history = ['.'] * 30

recThread1 = Thread(target=record1, daemon=True)

recThread1.start()
window()

f.close()
falt.close()
Ejemplo n.º 25
0

def evaluateIntent(response_obj):
    intent = response_obj['result']['action']
    print('DEBUG: intent: ' + intent)

    if intent == "minimize":
        target_window = response_obj['result']['parameters']['target_window']
        print('DEBUG: target_window: ', target_window)
        target_monitor = response_obj['result']['parameters']['target_monitor']
        print('DEBUG: target_monitor: ', target_monitor)
        methods.minimize(target_window, target_monitor)
    elif intent == "create_shortcut":
        methods.createShortcut("")


# obtain audio from the microphone
r = sr.Recognizer()
m = sr.Microphone()
with m as source:
    print("Say something: ")
    r.adjust_for_ambient_noise(source, duration=1)  # listen for 1 second to calibrate the energy threshold for ambient noise levels

# start listening in the background (note that we don't have to do this inside a `with` statement)
stop_listening = r.listen_in_background(m, callback)
# `stop_listening` is now a function that, when called, stops background listening

# main loop
while True:
    time.sleep(0.1)
Ejemplo n.º 26
0
fr_pg = tk.Frame(window)  # a frame to hold a progress bar

home_page = tk.Frame(window, width=600, height=700)
home_page.grid(row=0, column=1, sticky="ns")

greet = tk.Label(master=home_page, image=welcome_img)

greet.grid(row=0, column=0, sticky="ew", padx=5, pady=60)
nxt_btn = tk.Button(master=home_page,
                    text="Next>>",
                    command=notepad,
                    bg="#00ffff")
nxt_btn.grid(row=1, column=0, sticky="ew", padx=5)

recognizer = sr.Recognizer()  # a speech recognizer object
microphone = sr.Microphone()  # a speech input device object

#pg_begin
pg = Progressbar(fr_pg, orient=tk.HORIZONTAL, length=700, mode='determinate')
pg.grid(row=0, column=0, padx=10, pady=10)
#pg_end

#btnBegin
btn_back = tk.Button(fr_buttons, text="<<Back", command=back, bg="#00ffff")
btn_open = tk.Button(fr_buttons, image=open_img, command=open_file)
btn_save = tk.Button(fr_buttons, command=save_file, image=save_img)
btn_speak = tk.Button(fr_buttons, image=mic_img, command=speech_to_text_helper)
btn_paint = tk.Button(fr_buttons, image=paint_img, command=choose_option)
btn_audioToTxt = tk.Button(fr_buttons,
                           text="Audio To Text",
                           command=lambda: Task(window, enviar))
Ejemplo n.º 27
0
def record(rate=16000):
    r = sr.Recognizer()
    with sr.Microphone(sample_rate=rate) as source:
        audio = r.listen(source)
    with open(project_path + "\\record.wav", "wb") as f:
        f.write(audio.get_wav_data())
                                     **            **         **  **       **    **
                                     **         ********      **   **      **    **
                                     
----------------------------------------------------------------------------------------------------------------------
----------------------------------------------------------------------------------------------------------------------

 
''')
pyttsx3.speak("hi there you can talk with me")
print("hi there you can talk with me")
pyttsx3.speak('what do you want me to do?')


while True:
    variable = SRG.Recognizer()
    with SRG.Microphone() as source:
        print('what do you want me to do?')
        engine.runAndWait()
        
        audio_input = variable.record(source,duration=6)
        try:
            text = variable.recognize_google(audio_input)
            print(text)
        except:
            print(' Could not process audio...')
            pyttsx3.speak('Sorry Could not process audio')
            break
    if ('introduce' in text) or ('yourself' in text) :
        print('menu')
        pyttsx3.speak("myself pika your voice assistant developed by priyanka just tell me what do you want me to do mam")
    
Ejemplo n.º 29
0
from HIMU_MODULES import dataFilter as df
from HIMU_MODULES import dataMng as dm
import speech_recognition as sr

# obtain audio from the microphone
r = sr.Recognizer()
with sr.Microphone() as source:
    # listen for 5 seconds and create the ambient noise energy level
    r.adjust_for_ambient_noise(source, duration=2)
    print("Connecting..")
    os.system("clear")
    print("Say something!")
    audio = r.listen(source)

#sant = "Hello  there I'm jay from Humbingo bring me 4 piz with extra toping"
sant = audio
userVoice = dm.makeList(sant)

FinalStat = "Ok, Now Just chill we will bring you " + str(
    df.filtNo(userVoice)) + " " + df.filtFood(
        userVoice) + " with " + df.filtExtra(userVoice)
print(FinalStat)
Ejemplo n.º 30
0
    try:
        # for testing purposes, we're just using the default API key
        # to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
        # instead of `r.recognize_google(audio)`
        print("Google Speech Recognition thinks you said " +
              recognizer.recognize_google(audio))
    except sr.UnknownValueError:
        print("Google Speech Recognition could not understand audio")
    except sr.RequestError as e:
        print(
            "Could not request results from Google Speech Recognition service; {0}"
            .format(e))


r = sr.Recognizer()
m = sr.Microphone(device_index=2, sample_rate=8000)
with m as source:
    r.energy_threshold = 500
#    r.adjust_for_ambient_noise(source) # we only need to calibrate once, before we start listening

# start listening in the background (note that we don't have to do this inside a `with` statement)
stop_listening = r.listen_in_background(m, callback)
# `stop_listening` is now a function that, when called, stops background listening

# do some other computation for 5 seconds, then stop listening and keep doing other computations
import time
for _ in range(50):
    time.sleep(
        0.1
    )  # we're still listening even though the main thread is doing other things
stop_listening(