예제 #1
0
def who_are_you():
    messages = [
        'I am Darong, your lovely personal assistant.',
        'Darong, dint I tell you before?',
        'You ask that so many times! I am Darong.'
    ]
    tts(random.choice(messages))
예제 #2
0
def how_am_i():
    replies = [
        'You are goddamn handsome!', 'My knees go weak when I see you',
        'You look like the kindest person that I have met',
        'How come you have such a stupid question'
    ]
    tts(random.choice(replies))
예제 #3
0
def tell_joke():
    jokes = [
        'What happens to a frogs car when it breaks down? It gets toad away.',
        'Why was six scared of seven? because seven ate nine.',
        'No, I aways forget the punch line.'
    ]
    tts(random.choice(jokes))
예제 #4
0
def show_all_notes():
    conn = sqlite3.connect('memory.db')
    tts('Your notes are as follows: ')
    cursor = conn.execute("SELECT notes FROM notes")
    # the first "note" is one column of the database

    for row in cursor:
        print(row[0])
        tts(row[0])

    conn.close()
예제 #5
0
def note_something(speech_text):
    conn = sqlite3.connect('memory.db')
    words_of_message = speech_text.split()
    words_of_message.remove('note')
    cleaned_message = ' '.join(words_of_message)
    conn.execute("INSERT INTO notes (notes, notes_date) VALUES (?, ?)", \
                 (cleaned_message, datetime.strftime(datetime.now(), '%d-%m-%Y')))
    conn.commit()
    conn.close()

    tts('Your note has been saved')
예제 #6
0
    def driver(self):
        tts("Hi. I'm Scarlett. How can I help you today?")

        recognized_audio = self.listen_audio()

        if "scarlett" or "Scarlett" in recognized_audio:
            tts("Yes {}, how can I help you?".format(name))

        recognized_audio = self.listen_audio()

        return recognized_audio
예제 #7
0
def main():
    if os.path.isfile('profile.json'):
        profile = open('profile.json')
        profile_data = json.load(profile)
        profile.close()
    else:
        profile_populator()
        main()

    tts('Welcome ' + profile_data['name'] +
        ', systems are now ready to run. How can I help you?')
    stt(profile_data)
예제 #8
0
def connect_to_proxy(proxy_username, proxy_password):
    tts("Connecting to proxy server.")
    browser = webdriver.Firefox()
    browser.get('http://10.1.1.9:8090/httpclient.html')

    id_number = browser.find_element_by_name('username')
    password = browser.find_element_by_name('password')

    id_number.send_keys(proxy_username)
    password.send_keys(proxy_password)

    browser.find_element_by_name('btnSubmit').click()
예제 #9
0
def main():
    r = sr.Recognizer()
    with sr.Microphone() as source:
        print("Say something!")
        audio = r.listen(source)

    try:
        speech_text = r.recognize_google(audio).lower().replace("'", "")
        print("Melissa thinks you said '" + speech_text + "'")
    except sr.UnknownValueError:
        print("Melissa could not understand audio")
    except sr.RequestError as e:
        print("Could not request results from Google Speech Recognition service; {0}".format(e))

    tts(speech_text)
예제 #10
0
def passiveListen():
    THRESHOLD_MULTIPLIER = 1.8
    RATE = 16000
    CHUNK = 1024
    THRESHOLD_TIME = 1
    LISTEN_TIME = 300

    stream = _audio.open(format=pyaudio.paInt16,
                         channels=1,
                         rate=RATE,
                         input=True,
                         frames_per_buffer=CHUNK)

    frames = []
    lastN = [i for i in range(30)]

    for i in range(0, RATE / CHUNK * THRESHOLD_TIME):
        data = stream.read(CHUNK)
        frames.append(data)

        lastN.pop(0)
        lastN.append(getScore(data))
        average = sum(lastN) / len(lastN)

    THRESHOLD = average * THRESHOLD_MULTIPLIER
    frames = []
    didDetect = False

    for i in range(0, RATE / CHUNK * LISTEN_TIME):
        data = stream.read(CHUNK)
        frames.append(data)
        score = getScore(data)

        if score > THRESHOLD:
            didDetect = True
            stream.stop_stream()
            stream.close()
            time.sleep(1)
            tts('Yes?')
            main()

    if not didDetect:
        print "No disturbance detected"
        stream.stop_stream()
        stream.close()
예제 #11
0
파일: main.py 프로젝트: ssi379/Melissa-Core
def passiveListen():
    THRESHOLD_MULTIPLIER = 1.8
    RATE = 16000
    CHUNK = 1024
    THRESHOLD_TIME = 1
    LISTEN_TIME = 300

    stream = _audio.open(format=pyaudio.paInt16,
                              channels=1,
                              rate=RATE,
                              input=True,
                              frames_per_buffer=CHUNK)

    frames = []
    lastN = [i for i in range(30)]

    for i in range(0, RATE / CHUNK * THRESHOLD_TIME):
        data = stream.read(CHUNK)
        frames.append(data)

        lastN.pop(0)
        lastN.append(getScore(data))
        average = sum(lastN) / len(lastN)

    THRESHOLD = average * THRESHOLD_MULTIPLIER
    frames = []
    didDetect = False

    for i in range(0, RATE / CHUNK * LISTEN_TIME):
        data = stream.read(CHUNK)
        frames.append(data)
        score = getScore(data)

        if score > THRESHOLD:
            didDetect = True
            stream.stop_stream()
            stream.close()
            time.sleep(1)
            tts('Yes?')
            main()

    if not didDetect:
        print "No disturbance detected"
        stream.stop_stream()
        stream.close()
예제 #12
0
파일: main.py 프로젝트: 19hunter19/Melissa
def main():

    # GET INPUT
    r = sr.Recognizer()
    with sr.Microphone() as source:
        print("Say something!")
        audio = r.listen(source)

    # PRODUCE OUTPUT
    try:
        speech_text = r.recognize_google(audio).lower().replace("'", "")
        tts('You said, ' + speech_text)

    except sr.UnknownValueError:
        print('Audio not understood')

    except sr.RequestError as e:
        print('Could not request results from GSR service; {0}'.format(e))
예제 #13
0
def stt():
    tts("Hi! I'm Scarlett. How can I help you today?")
    r = sr.Recognizer()

    with sr.Microphone() as source:
        audio = r.listen(source)

    try:
        recognized_speech = r.recognize_google(audio)

        # tts(recognized_speech)

        return recognized_speech

    except sr.UnknownValueError:
        print("Couldn't recognize audio")
    except sr.RequestError as e:
        print(
            "Couldn't request results from Google speech service : {}".format(
                e))
예제 #14
0
def main():
    #obtain audio from the microphone
    r = sr.Recognizer()
    with sr.Microphone() as source:
        print("Say something!")
        audio = r.listen(source)

    #recognize speech using WIT.AI
    WIT_AI_KEY = "YVRDNX5MC5L6DJRZYU44MOIUWS56JDRW"

    #WIT_AI_KEY = "3HJYXBPG6BZ3KFZG3KQHIOGAYD5OLWXK"

    try:
        speech_text = r.recognize_wit(audio, key=WIT_AI_KEY)
        print("Jarvis thinks you said '" + speech_text + "'")
    except sr.UnknownValueError:
        print("Jarvis could not understand audio")
    except sr.RequestError as e:
        print("Could not request results from Wit.ai service; {0}".format(e))

    tts(speech_text)
예제 #15
0
def define_subject(speech_text):
    words_of_message = speech_text.split()
    words_of_message.remove('define')
    cleaned_message = ''.join(words_of_message)

    try:
        wiki_data = wikipedia.summary(cleaned_message, sentences=5)
        regEx = re.compile(r'([^\(]*)\([^\)]*\) *(.*)')
        m = regEx.match(wiki_data)
        while m:
            wiki_data = m.group(1) + m.group(2)
            m = regEx.match(wiki_data)

        wiki_data = wiki_data.replace("'", "")
        tts(wiki_data)
        print(wiki_data)

    except wikipedia.exceptions.DisambiguationError as e:
        tts('Can you please be more specific? You may choose something' +
            'from the following.')
        print("Can you please be more specific? You may choose something" +
              "from the following; {0}".format(e))
예제 #16
0
def main():
    profile = open('profile.yaml')
    profile_data = yaml.safe_load(profile)
    profile.close()

    r = sr.Recognizer()

    tts('Welcome ' + profile_data['name'] + ', systems are now ready to run. How can I help you?')

    if profile_data['stt'] == 'google':
        while True:
            with sr.Microphone() as source:
                r.adjust_for_ambient_noise(source)
                print("Say something!")
                audio = r.listen(source)

            try:
                speech_text = r.recognize_google(audio).lower().replace("'", "")
                print("Melissa thinks you said '" + speech_text + "'")
            except sr.UnknownValueError:
                print("Melissa could not understand audio")
            except sr.RequestError as e:
                print("Could not request results from Google Speech Recognition service; {0}".format(e))
            else:
                brain(profile_data, speech_text)

    elif profile_data['stt'] == 'sphinx':

        def sphinx_stt():
            modeldir = profile_data['pocketsphinx']['modeldir']
            hmm = profile_data['pocketsphinx']['hmm']
            lm = profile_data['pocketsphinx']['lm']
            dic = profile_data['pocketsphinx']['dic']

            config = Decoder.default_config()
            config.set_string('-hmm', os.path.join(modeldir, hmm))
            config.set_string('-lm', os.path.join(modeldir, lm))
            config.set_string('-dict', os.path.join(modeldir, dic))
            config.set_string('-logfn', '/dev/null')
            decoder = Decoder(config)

            stream = open('recording.wav', 'rb')

            in_speech_bf = False
            decoder.start_utt()
            while True:
                buf = stream.read(1024)
                if buf:
                    decoder.process_raw(buf, False, False)
                    if decoder.get_in_speech() != in_speech_bf:
                        in_speech_bf = decoder.get_in_speech()
                        if not in_speech_bf:
                            decoder.end_utt()
                            speech_text = decoder.hyp().hypstr
                            print speech_text
                            decoder.start_utt()
                else:
                    break
            decoder.end_utt()
            return speech_text.lower().replace("'", "")

        while True:
            with sr.Microphone() as source:
                r.adjust_for_ambient_noise(source)
                print("Say something!")
                audio = r.listen(source)

            with open("recording.wav", "wb") as f:
                f.write(audio.get_wav_data())

            brain(profile_data, sphinx_stt())
예제 #17
0
import speech_recognition as sr

from GreyMatter.SenseCells.tts import tts
from brain import brain

profile = open("profile.yaml.default")
profile_data = yaml.safe_load(profile)
profile.close()

#Variables
name = profile_data['name']
city_name = profile_data['city_name']
city_code = profile_data['city_code']

tts('Welcome ' + name + ', systems are ready to run')


def main():
    r = sr.Recognizer()
    with sr.Microphone(1) as source:
        print('Speak')
        audio = r.listen(source)
    try:
        speech_text = r.recognize_google(audio).lower().replace("'", "")
        print("Vicky thinks you said '" + speech_text + "'")
    except sr.UnknownValueError:
        print("Vicky could not understand you")
    except sr.RequestError as e:
        print("Could not request results from Speech recognition service; {0}".
              format(e))
예제 #18
0
def how_are_you():
    messages = ["I am dying inside. Thanks for asking.", "I am okay as long as I serve you.", "I'm fine, thank you."]
    tts(random.choice(messages))
예제 #19
0
def who_am_i(name):
    tts("You are " + name + ", my brilliant creator. I worship you.")
예제 #20
0
def open_firefox():
    tts("Openning Firefox")
    webdriver.Firefox()
예제 #21
0
    elif speech_check(["how", "i", "look"]):
        general_conversation.how_am_i()
    elif speech_check(["what", "time", "now"]):
        time_teller.what_is_time()
    else:
        general_conversation.undefined()


profile = open('profile.yaml')
profile_data = yaml.safe_load(profile)
profile.close()

name = profile_data['name']
city_name = profile_data['city_name']

tts("Welcome " + name + ". The system is ready to serve you")


def main():
    r = sr.Recognizer()

    with sr.Microphone() as source:
        print("How can I help you?")
        audio = r.listen(source)

    try:
        speech_text = r.recognize_google(audio).lower().replace("'", "")
        print("Scarlett thinks you said,' " + speech_text + " '")
        brain(speech_text)
    except sr.UnknownValueError:
        print("Scarlett could not understand audio !!")
예제 #22
0
파일: main.py 프로젝트: ssi379/Melissa-Core
profile_data = yaml.safe_load(profile)
profile.close()

# Functioning Variables
name = profile_data['name']
music_path = profile_data['music_path']
city_name = profile_data['city_name']
city_code = profile_data['city_code']
proxy_username = profile_data['proxy_username']
proxy_password = profile_data['proxy_password']
access_token = profile_data['twitter']['access_token']
access_token_secret = profile_data['twitter']['access_token_secret']
consumer_key = profile_data['twitter']['consumer_key']
consumer_secret = profile_data['twitter']['consumer_secret']

tts('Welcome ' + name + ', systems are now ready to run. How can I help you?')

# Thanks to Jasper for passive code snippet.

_audio = pyaudio.PyAudio()

def getScore(data):
    rms = audioop.rms(data, 2)
    score = rms / 3
    return score

def fetchThreshold():
    THRESHOLD_MULTIPLIER = 1.8
    RATE = 16000
    CHUNK = 1024
    THRESHOLD_TIME = 1
예제 #23
0
render = web.template.render('templates/')

urls = (
    '/',
    'index',
)

profile = open('profile.yaml')
profile_data = yaml.safe_load(profile)
profile.close()

# Functioning Variables
name = profile_data['name']

tts('Welcome ' + name + ', systems are now ready to run. How can I help you?')


class index:
    def GET(self):
        return render.index()

    def POST(self):
        x = web.input(myfile={})
        filedir = os.getcwd(
        ) + '/uploads'  # change this to the directory you want to store the file in.
        if 'myfile' in x:  # to check if the file-object is created
            filepath = x.myfile.filename.replace(
                '\\',
                '/')  # replaces the windows-style slashes with linux ones.
            filename = filepath.split(
예제 #24
0
def undefine():
    tts('I do not know what that means!')
예제 #25
0
def undefined():
    tts("I dont know what that means!")
예제 #26
0
def how_are_you():
    tts('I am fine, thank you.')
예제 #27
0
def where_born():
    tts('I was created by a magician name Frank, in China')
예제 #28
0
def go_to_sleep():
    tts("Goodbye! Have a great day!")
    quit()
    def mirror():
        import json
        API_key = "o.0uXY4V8zygueZLziiZcaXA18ldTldaSl"
        url = "https://api.pushbullet.com/v2/pushes?limit=1"
        headers = {'Access-Token': API_key}

        # pb = Pushbullet(API_key)

        ws = websocket.create_connection(
            "wss://stream.pushbullet.com/websocket/{}".format(API_key))

        while True:
            result = ws.recv()

            # print(result)
            # print(type(result))
            # result = ast.literal_eval(result)
            result = json.loads(result)
            # print(result)
            pprint(result)
            print(type(result))

            if result["type"] == "tickle":
                r = requests.get(url, headers=headers)
                re = r.json()
                pprint(re)
                push = re['pushes']
                text = push[0]['body']
                name = push[0]['sender_name']
                tts("You have a new notification from {}. {} sent {}".format(
                    name, name, text))

            elif result['type'] == 'push' and result['push'][
                    'type'] != 'dismissal':
                push = result['push']
                application = push['application_name']
                text = push['body']
                sender = push['title']

                # Clear title
                no_msg = 0
                name = ''
                for i in range(len(sender)):
                    if sender[i] == '(' or sender[i] == ':':
                        if sender[i] == ':':
                            no_msg = 1
                            break
                        else:
                            i += 1
                            try:
                                no_msg = eval(sender[i:i + 1])
                            except:
                                no_msg = eval(sender[i])
                            break
                    else:
                        name += sender[i]
                if no_msg == 0:
                    no_msg = 1
                print(name, no_msg, text, application)

                if 'call' in sender:
                    # Incoming call handling
                    by = push['body']
                    msg = "You have an {} by {}".format(sender, by)
                else:
                    # Incoming message handling. Tested for the following apps:
                    # Whatsapp
                    # Messenger
                    msg = "You have {} new notification from {} by {}. {}".format(
                        no_msg, application, name, text)
                print(msg)
                tts(msg)
                pass
예제 #30
0
def who_are_you():
    messages = ["I am Melissa, your slave for eternity.", "Melissa, did I not I tell you before?", "You imbecile. I am Melissa."]
    tts(random.choice(messages))
 def __speak_(self, msg):
     tts(msg)
     pass
예제 #32
0
def what_is_time():
    tts("The time is " + datetime.strftime(datetime.now(), '%H:%M:%S'))
예제 #33
0
def who_am_i(name):
    tts('You are ' + name + ', a brilliant persion. I love you!')
예제 #34
0
# Functioning Variables
name = profile_data["name"]
music_path = profile_data["music_path"]
images_path = profile_data["images_path"]
city_name = profile_data["city_name"]
city_code = profile_data["city_code"]
proxy_username = profile_data["proxy_username"]
proxy_password = profile_data["proxy_password"]
access_token = profile_data["twitter"]["access_token"]
access_token_secret = profile_data["twitter"]["access_token_secret"]
consumer_key = profile_data["twitter"]["consumer_key"]
consumer_secret = profile_data["twitter"]["consumer_secret"]
client_id = profile_data["imgur"]["client_id"]
client_secret = profile_data["imgur"]["client_secret"]

tts("Welcome " + name + ", systems are now ready to run. How can I help you?")

# Thanks to Jasper for passive code snippet.

_audio = pyaudio.PyAudio()


def getScore(data):
    rms = audioop.rms(data, 2)
    score = rms / 3
    return score


def fetchThreshold():
    THRESHOLD_MULTIPLIER = 1.8
    RATE = 16000