Exemple #1
0
def common_words_method(language_full, language):
    welcome = (
        "Welcome to the common words section. We are going to learn the %s translation of ten most common english words"
    ) % (language_full)
    print(welcome)

    f = open('data.txt', 'r')
    random_indices = []
    for i in range(5):
        random_indices.append(random.randint(1, 3001))
    #print(f[0],f[1])
    line_number = 1
    eng = []
    esp = []
    for line in f:
        if (line_number in random_indices):
            eng.append(line)
            tran = translate_text_to_foreign(line, language)
            esp.append(tran)
            print(line + "->" + tran)
            line_object_english = TextToSpeech(line, "en")
            line_object = TextToSpeech(tran, language)
            line_object_english.speech()
            line_object.speech()
            print(" ")
        line_number += 1

    print("Finally the words with their translations are- ")
    for i in range(len(eng)):
        print(eng[i], esp[i])
Exemple #2
0
    def __init__(self, detect_model="data/andrew2.net",
                       lyrics_model="data/keras_model_1200.h5",
                       lyrics_chars="data/chars.pkl"):
        # microphone
        self.mic = ResumableMicrophoneStream(SAMPLE_RATE, CHUNK_SIZE)

        # wake word detector
        self.detector = TriggerDetector(detect_model)

        # speech and language services
        self.speech_client = SpeechToText()
        self.luis = LangUnderstand()
        self.tts = TextToSpeech()

        # lyrics generator model
        self.lyrics_gen = LyricsGenerator(lyrics_model, lyrics_chars)

        self.pred_queue = DetectQueue(maxlen=5)
        self.is_wakeup = False

        # pytft display
        self.tft = TFTDisplay()
        self.tft_queue = queue.Queue()
        self.tft_thread = threading.Thread(target=self.tft_manage, args=())
        self.tft_thread.daemon = True
        self.tft_thread.start()

        self.notify("hi_there")
Exemple #3
0
    def test_makeRequest(self):
        face = ['will']
        object = ['snowboard']
        data = OneFaceOneObjectFake()

        results = [
            "",
            "I am 100 percent sure that I can see a snowboard  behind me on my left.",
            "Yes, I am 100 percent sure that I can see exactly one snowboard  behind me on my left.",
            "I am 84 percent sure that I can see will. Not the real one though. behind me on my left.",
            "Yes, I am 84 percent sure that I can see will. Not the real one though. behind me on my left.",
            "I am 100 percent confident that the snowboard belongs to Will. behind me on my left.",
            "",
            "I am 100 percent sure that the snowboard is here  behind me on my left.",
            "Yes, I am 84 percent sure that a fake will is here,  behind me on my left."
        ]

        for id in range(1, 9):
            if id == 6:
                continue
            data.instruction_id = id
            text_to_speech = TextToSpeech(data,
                                          'en-EN',
                                          input_faces=face,
                                          input_objects=object)
            out = text_to_speech.callback()
            self.assertEqual(out, results[id])
Exemple #4
0
 def start_rap(self):
     media = vlc.MediaPlayer("assets/beat.mp3")
     media.play()
     media.set_time(20000)
     for i in self.player_raps[self.current_player]:
         TextToSpeech(i)
     media.stop()
Exemple #5
0
    def test_makeRequest(self):
        face = ['will']
        object = ['snowboard']
        data = ThreeFacesThreeObjectsTwoSameOneDifferent()

        results = [
            "", "I can see 2 snowboards, and 1 sports ball.",
            "Yes, I can actually see more than one snowboard. I think there are 2.  ",
            "I can see will nico and eivinas.",
            "Yes, I am 84 percent sure that I can see will  behind me on my left.",
            "I think that the following people have a snowboard: Will and Eivinas.",
            "",
            "I can actually see more than one snowboard. I think there are 2. One of them is here.  ",
            "I am 84 percent sure that will is here  behind me on my left."
        ]

        for id in range(1, 9):
            if id == 6:
                continue
            data.instruction_id = id
            text_to_speech = TextToSpeech(data,
                                          'en-EN',
                                          input_faces=face,
                                          input_objects=object)
            out = text_to_speech.callback()
            self.assertEqual(out, results[id])
Exemple #6
0
    def test_makeRequest(self):
        face = ['will']
        object = ['snowboard']
        data = ThreeFacesThreeObjectsThreeDifferent()

        results = [
            "", "I can see 1 sports ball, 1 snowboard, and 1 tennis racket.",
            "Yes, I am 100 percent sure that I can see exactly one snowboard  behind me on my left.",
            "I can see will nico and eivinas.",
            "Yes, I am 84 percent sure that I can see will  behind me on my left.",
            "I am 100 percent confident that the snowboard belongs to Will. behind me on my left.",
            "",
            "I am 100 percent sure that the snowboard is here  behind me on my left.",
            "I am 84 percent sure that will is here  behind me on my left."
        ]

        for id in range(1, 9):
            if id == 6:
                continue
            data.instruction_id = id
            text_to_speech = TextToSpeech(data,
                                          'en-EN',
                                          input_faces=face,
                                          input_objects=object)
            out = text_to_speech.callback()
            self.assertEqual(out, results[id])
Exemple #7
0
 def __init__(self):
     self.car = Vehicle()
     # Verify that the Vehicle object was created
     # print("Vehicle made!")
     # self.find_own_pos("1362060585", "1362060062")
     self.minute_warning = False
     self.second_warning = False
     self.meter_message = False
     self.map = MapView()
     self.text_to_speech = TextToSpeech()
 def greeting(self):
     try:
         chat_bot = ChatBot(None)
         response = chat_bot.send_message(self.first_message)
         output = response[0]
         self.con_id = response[1]
         self.browser.append("<p>Agent: " + output + "</p>")
         self.first_message = False
         TextToSpeech(output)
     except Exception as e:
         print(e)
Exemple #9
0
def send_text(message):
    if message.text.lower() == 'привет':
        bot.send_message(message.chat.id, 'Привет')
    elif message.text.lower() == 'пока':
        bot.send_message(message.chat.id, 'Прощай, создатель')
    elif message.text.lower() == 'я тебя люблю':
        bot.send_sticker(message.chat.id, 'CAADAgADZgkAAnlc4gmfCor5YbYYRAI')
    else:
        tts = TextToSpeech(text=message.text, lang='ru')
        ogg_file = tts.save()
        with open(ogg_file, 'rb') as f:
            bot.send_voice(message.chat.id, f)
 def receive_message(self, message):
     try:
         chat_bot = ChatBot(message)
         if self.con_id is None:
             response = chat_bot.send_message(self.first_message)
         else:
             response = chat_bot.send_message(self.first_message,
                                              self.con_id)
         output = response[0]
         self.con_id = response[1]
         self.browser.append("<p>Agent: " + output + "</p>")
         self.first_message = False
         TextToSpeech(output)
     except Exception as e:
         print(e)
    def __init__(self, config):

        # X. Initialize announcer.
        self.announcer = TextToSpeech()
        self.announcer.add_speech_text(
            "Initializing system. Please wait for a moment.")

        # X. Tf
        self.broadcaster = tf.TransformBroadcaster()
        self.listener = tf.TransformListener()

        # Prepare shared object.
        self.data = SharedData()

        # Publishers & Subscribers
        self.pubs = Publishers()
        self.subs = Subscribers(self.announcer)

        # Store tf
        if (not config.pose_init):
            while (self.subs.odom.get_object() is None):
                time.sleep(0.5)
            odom = self.subs.odom.get_object()
            odom_xyzrpy = create_xyzrpy_from_pose(odom.pose.pose)
            pose_stamped = PoseStamped()
            pose_stamped.header.stamp = rospy.Time.now()
            pose_stamped.header.frame_id = 'map'
            pose_stamped.pose = create_pose_from_xyzrpy(config.xyzrpy)
            self.data.last_valid_ndt_pose = pose_stamped
            self.data.last_valid_tf_odom_to_map = create_transform_from_pose(
                config.xyzrpy, odom_xyzrpy)

        # Initilize Pose
        self.initialize_pose(config)

        # Start checking thread
        self.checking_thread = threading.Thread(target=self.run_checking)
        self.checking_thread.start()

        # Start control
        self.run_control()

        # Terminate thread.
        self.announcer.terminate()
        self.checking_thread.join()
    def execute(self, userdata):
        """
        Execute function called in the state machine

        Key arguments:
        userdata -- state machine userdata object being passed around

        """
        rospy.logdebug("Information")
        rospy.loginfo(self.output_msg + "\n")
        rospy.loginfo(userdata.recog_elements_in)

        # Prepare sentence and get information on the camera
        rospy.set_param(ACTIVE_CAMERA,
                        1)  # Default front camera (changed by audio request)
        text_to_speech = TextToSpeech(userdata.recog_elements_in,
                                      userdata.lang_code_in,
                                      userdata.action_in[1],
                                      userdata.action_in[2])
        self.output_msg = text_to_speech.callback()

        # Indicating to the Display Controller that the display should show what the robot thinks
        rospy.set_param(RECOG_UPPER_X, userdata.recog_elements_in.upper_x)
        rospy.set_param(RECOG_UPPER_Y, userdata.recog_elements_in.upper_y)
        rospy.set_param(RECOG_LOWER_X, userdata.recog_elements_in.lower_x)
        rospy.set_param(RECOG_LOWER_Y, userdata.recog_elements_in.lower_y)
        rospy.set_param(RECOG_LABEL, userdata.recog_elements_in.label)
        rospy.set_param(RECOG_CAMERA, userdata.recog_elements_in.camera)
        rospy.set_param(DISPLAY_INFORMATION, True)

        # Maintain the thread for the duration of the speech out
        audio_out_client.make_request(self.output_msg, userdata.lang_code_in)

        # Resetting Parameters
        rospy.set_param(RECOG_UPPER_X, [])
        rospy.set_param(RECOG_UPPER_Y, [])
        rospy.set_param(RECOG_LOWER_X, [])
        rospy.set_param(RECOG_LOWER_Y, [])
        rospy.set_param(RECOG_LABEL, [])
        rospy.set_param(RECOG_CAMERA, [])
        rospy.set_param(DISPLAY_INFORMATION, False)
        return 'turning'
Exemple #13
0
    def test_makeRequest(self):
        face = ['will']
        object = ['snowboard']
        data = EmptyData()

        results = [
            "", "I can’t see any objects.", "No, I cannot see any snowboard.",
            "I can’t see anybody.", "No, I cannot see will.",
            "I don’t think anybody has a snowboard.", "",
            "I cannot see any snowboard.", "I cannot see will."
        ]

        for id in range(1, 9):
            if id == 6:
                continue
            data.instruction_id = id
            text_to_speech = TextToSpeech(data,
                                          'en-EN',
                                          input_faces=face,
                                          input_objects=object)
            out = text_to_speech.callback()
            self.assertEqual(out, results[id])
Exemple #14
0
 def __init__(self, config):
     """
     Initialize
     :param config: configuration
     :type config: Config
     """
     self.command_processor = CommandProcessor(
         self._command_handlers(config.command_handlers))
     self.robot = Robot(config.apiai.client_access_token,
                        config.apiai.language,
                        self.command_processor.commands)
     self.speech_to_text = SpeechToText(
         config.speechkit.key, "", config.speechkit.recognition.language)
     self.text_to_speech = TextToSpeech(
         config.speechkit.synthesis.cache_size, config.speechkit.key,
         config.speechkit.synthesis.language,
         config.speechkit.synthesis.speaker,
         config.speechkit.synthesis.emotion,
         config.speechkit.synthesis.speed)
     self.record = SpeechCapture(config.record.silence_calculation_chunks,
                                 config.record.speech_level_coefficient,
                                 config.record.start_wait_chunks,
                                 config.record.finish_wait_chunks)
Exemple #15
0
    def test_makeRequest(self):
        face = ['will']
        object = ['snowboard']
        data = TwoFacesOneFakeOneReal()

        results = [
            "", "I can’t see any objects.", "No, I cannot see any snowboard.",
            "I can see nico and will. But at least one of them is fake.",
            "Yes, I am 84 percent sure that I can see will. Not the real one though. behind me on my left.",
            "I don’t think anybody has a snowboard.", "",
            "I cannot see any snowboard.",
            "Yes, I am 84 percent sure that a fake will is here,  behind me on my left."
        ]

        for id in range(1, 9):
            if id == 6:
                continue
            data.instruction_id = id
            text_to_speech = TextToSpeech(data,
                                          'en-EN',
                                          input_faces=face,
                                          input_objects=object)
            out = text_to_speech.callback()
            self.assertEqual(out, results[id])
Exemple #16
0
def predict(speech_recognition=False, speech_synthesis=False):
    ''' Работа с обученной моделью seq2seq.
    1. speech_recognition - включение распознавания речи с микрофона с помощью PocketSphinx
    2. speech_synthesis - включение озвучивания ответов с помощью RHVoice '''
    name_dataset = configure_file_names()

    ttt = TextToText(f_name_w2v_model=f_name_w2v_model,
                     f_name_model=f_name_model,
                     f_name_model_weights=f_name_model_weights)

    if speech_recognition:
        print('[i] Загрузка языковой модели для распознавания речи...')
        stt = SpeechToText('from_microphone', name_dataset)

    if speech_synthesis:
        print('[i] Загрузка синтезатора речи...')
        tts = TextToSpeech('anna')

    print()
    question = ''
    while (True):
        if speech_recognition:
            print('Слушаю...')
            question = stt.get()
            os.write(sys.stdout.fileno(), curses.tigetstr('cuu1'))
            print('Вы: ' + question)
        else:
            question = input('Вы: ')
        answer, lost_words = ttt.predict(question, True)
        print('\t=> %s' % answer)
        if len(lost_words) > 0:
            print('[w] Потерянные слова: ' + ', '.join(lost_words) + '\n')
        else:
            print()
        if speech_synthesis:
            tts.get(answer)
Exemple #17
0
    def test_makeRequest(self):
        face = ['will']
        object = ['snowboard']
        data = OneFaceReal()

        results = [
            "", "I can’t see any objects.", "No, I cannot see any snowboard.",
            "I am 100 percent sure that I can see will  behind me on my left.",
            "Yes, I am 100 percent sure that I can see will  behind me on my left.",
            "I don’t think anybody has a snowboard.", "",
            "I cannot see any snowboard.",
            "I am 100 percent sure that will is here  behind me on my left."
        ]

        for id in range(1, 9):
            if id == 6:
                continue
            data.instruction_id = id
            text_to_speech = TextToSpeech(data,
                                          'en-EN',
                                          input_faces=face,
                                          input_objects=object)
            out = text_to_speech.callback()
            self.assertEqual(out, results[id])
Exemple #18
0
    def test_makeRequest(self):
        face = ['will']
        object = ['snowboard']
        data = TwoFacesSameReal()

        results = [
            "", "I can’t see any objects.", "No, I cannot see any snowboard.",
            "I can see will and will.",
            "Yes, I can actually see will more than once. I knew I shouldn’t have had that last pint last night.  ",
            "I don’t think anybody has a snowboard.", "",
            "I cannot see any snowboard.",
            "Yes, I can actually see will more than once. I knew I shouldn’t have had that last pint last night. One of them is here.  "
        ]

        for id in range(1, 9):
            if id == 6:
                continue
            data.instruction_id = id
            text_to_speech = TextToSpeech(data,
                                          'en-EN',
                                          input_faces=face,
                                          input_objects=object)
            out = text_to_speech.callback()
            self.assertEqual(out, results[id])
Exemple #19
0
from text_to_speech import TextToSpeech

obj = TextToSpeech()
obj.text_input()
obj.create_gtts_object()
obj.speech_save()
obj.run()
    def __init__(self):
        self.queue = Queue()
        self.alarm_running_queue = Queue()
        self.media = MultimmediaController()
        self.sched = Scheduler()
        self.tts = TextToSpeech()
        self.schedule = {}
        #self.gcal = GoogleCalendar()
        self.arduino = ArduinoController(0x08, self.queue)
        self.gpio = RPiGPIO()
        self.radio_db = RadioDB(
            "/home/pi/Python/NetRadioAlarmClockPi/radio-settings.db")
        self.webserver = webserver
        self.webserver.add_station_cb = self.add_station
        self.webserver.get_stations_cb = self.radio_db.get_stations
        self.webserver.set_current_station_cb = self.set_current_station
        self.webserver.get_current_station_cb = self.get_current_station
        self.webserver.update_station_name_cb = self.radio_db.update_station_title
        self.webserver.get_stream_playing_cb = self.media.get_playing
        self.webserver.play_stream_cb = self.start_stream
        self.webserver.stop_stream_cb = self.stop_stream
        self.webserver.set_volume_cb = self.media.set_volume
        self.webserver.get_schedule_cb = self.get_schedule
        self.webserver.save_schedule_cb = self.save_schedule
        self.webserver.alarm_running_queue = self.alarm_running_queue
        self.alarm_running = False
        self.update_interval = 60 * 5
        self.state = "idle"
        self.gpio.set_snooze_btn_callback(self.alarm_snooze_event)
        self.webdev = False
        self.stream_playing = False
        self.alarm_running_queue.put(False)
        self.last_snooze_time = None
        self.button_press_count = 0
        self.debouncing = False

        # Set current station
        url, station_title = self.get_current_station()
        self.media.set_stream_url(url)

        # Load radio alarm schedule and set up events if there are any
        self.load_schedule()
        self.create_events()

        # Test event
        test_time = datetime.datetime(2021, 2, 23, 20, 14, 0)
        self.sched.add_event(test_time, self.alarm_event)

        if len(sys.argv) > 1:
            #print(sys.argv)
            if sys.argv[1].replace("\r", "") == "webdev":
                print("Web development mode")
                self.webdev = True

        if not self.webdev:
            print("Resetting Arduino")
            self.gpio.reset_arduino()
            print("Done")

            self.arduino.set_vol_change_callback(self.media.set_volume)
            self.arduino.start_rot_enc_thread()
Exemple #21
0
def main():
    rospy.init_node("cyborg_audio")
    playback = Playback()
    text_to_speech = TextToSpeech()
    rospy.spin()
Exemple #22
0
def run(host, port, wsgi=False, https_mode=False):
    ''' Автовыбор доступного порта (если указан порт 0), загрузка языковой модели и нейронной сети и запуск сервера.
    1. wsgi - True: запуск WSGI сервера, False: запуск тестового Flask сервера
    2. https - True: запуск в режиме https (сертификат и ключ должны быть в cert.pem и key.pem), False: запуск в режиме http
    
    Самоподписанный сертификат можно получить, выполнив: openssl req -x509 -newkey rsa:4096 -nodes -out temp/cert.pem -keyout temp/key.pem -days 365 '''

    if port == 0:  # Если был введён порт 0, то автовыбор любого доступного порта
        try:
            sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            sock.bind((host, 0))
            port = sock.getsockname()[1]
            log('выбран порт ' + str(port))
            sock.close()
        except socket.gaierror:
            log('адрес ' + host + ':' + str(port) + ' некорректен',
                level='error')
            sock.close()
            return
        except OSError:
            log('адрес ' + host + ':' + str(port) + ' недоступен',
                level='error')
            sock.close()
            return

    log('Flask v.' + flask_version + ', WSGIServer v.' + wsgi_version)
    log('установлен максимальный размер принимаемых данных: {:.2f} Кб'.format(
        max_content_length / 1024))

    name_dataset = f_name_w2v_model_plays[
        f_name_w2v_model_plays.rfind('w2v_model_') +
        len('w2v_model_'):f_name_w2v_model_plays.rfind('.bin')]
    log('загрузка обученной на наборе данных ' + name_dataset +
        ' модели seq2seq...')
    global ttt
    print()
    ttt = TextToText(f_name_w2v_model=f_name_w2v_model_plays,
                     f_name_model=f_name_model_plays,
                     f_name_model_weights=f_name_model_weights_plays)
    print()

    log('загрузка языковой модели для распознавания речи...')
    global stt
    stt = SpeechToText('from_file', name_dataset)

    log('загрузка синтезатора речи...')
    global tts
    tts = TextToSpeech('anna')

    if wsgi:
        global http_server
        if https_mode:
            log('WSGI сервер запущен на https://' + host + ':' + str(port) +
                ' (нажмите Ctrl+C или Ctrl+Z для выхода)')
        else:
            log('WSGI сервер запущен на http://' + host + ':' + str(port) +
                ' (нажмите Ctrl+C или Ctrl+Z для выхода)')
        try:
            if https_mode:
                http_server = WSGIServer((host, port),
                                         app,
                                         log=app.logger,
                                         error_log=app.logger,
                                         keyfile='temp/key.pem',
                                         certfile='temp/cert.pem')
            else:
                http_server = WSGIServer((host, port),
                                         app,
                                         log=app.logger,
                                         error_log=app.logger)
            http_server.serve_forever()
        except OSError:
            print()
            log('адрес ' + host + ':' + str(port) + ' недоступен',
                level='error')
    else:
        log('запуск тестового Flask сервера...')
        try:
            if https_mode:
                app.run(host=host,
                        port=port,
                        ssl_context=('temp/cert.pem', 'temp/key.pem'),
                        threaded=True,
                        debug=False)
            else:
                app.run(host=host, port=port, threaded=True, debug=False)
        except OSError:
            print()
            log('адрес ' + host + ':' + str(port) + ' недоступен',
                level='error')
Exemple #23
0
print("language code is %s"%(language_to_be_learnt))
print("How do you want to learn today?")
print(" ")
selector=input(("Enter 1 for translation help. Enter 2 to see the most used words translated for you. Enter 3 for a fun quiz. Enter 4 to hear %s pronunciation of  specific words: ")%(language_full_name))

if(selector=="1"):
    fine_selector=input(("Enter 0 to translate from english to %s. Enter 1 to translate from %s to english: ")%(language_full_name,language_full_name))
    if(fine_selector=="0"):
        times=int(input("Number of sentences to be translated: "))
        for i in range(times):
            text=(input(("Enter text to be translated to %s: ")%(language_full_name)))

        
            translation =translate_text_to_foreign(text,language_to_be_learnt)
            print("The translation is %s"%(translation))
            speech_object = TextToSpeech(translation,language_to_be_learnt)
            speech_object.speech()
            print(" ")
            if(i==times-1):
                print("Thanks!!")
               

    else:
        times=int(input("Number of sentences to be translated: "))
        for i in range(times):
            text=(input("Enter text to be translated to english: "))

            #language_to_be_learnt="English"
            translation =translate_text_to_english(text,language_to_be_learnt)
            print("The translation is %s"%(translation))
            speech_object = TextToSpeech(translation,"en")
Exemple #24
0
def main():
    CHUNK = 1024
    CHANNELS = 1
    RATE = 16000
    FORMAT = 'S16LE'
    BYTE_RATE = 32000
    STATUS_KWS = 0
    STATUS_ASR = 1
    STATUS_TTS = 2
    IP = '203.113.152.90'
    IP = '10.30.154.10'

    URI = 'ws://{}:8892/client/ws/speech'.format(IP)
    content_type = "audio/x-raw, layout=(string)interleaved, rate=(int){}, format=(string){}, channels=(int){}".format(
        RATE, FORMAT, CHANNELS)
    IS_ACTIVE = False

    p = pyaudio.PyAudio()
    stream = p.open(format=pyaudio.paInt16,
                    channels=CHANNELS,
                    rate=RATE,
                    input=True,
                    frames_per_buffer=CHUNK)

    sr = None
    sentence = None
    kws = KeywordSpotting(graph='graph/conv_kws.pb',
                          labels='graph/conv_kws_labels.txt')
    tts = TextToSpeech(url='http://{}/hmm-stream/syn'.format(IP),
                       voices='doanngocle.htsvoice')
    brain = aiml.Kernel()
    brain.learn("std-startup.xml")
    brain.respond("load aiml b")

    status = STATUS_KWS
    print("===============================")
    print("[INFO] Waiting keyword [xin chào | chào bot | hi bot] ...")
    while True:
        block = stream.read(CHUNK)
        if status == STATUS_KWS:
            if kws.spotting(block):
                status = STATUS_ASR
                print("[INFO] Keyword detected! Start recognize ...")
                sr = SpeechRecognize(
                    url=URI + '?%s' %
                    (urllib.parse.urlencode([("content-type", content_type)])),
                    byterate=BYTE_RATE,
                    one_sentence=False)
                sr_response = sr.get_response_queue()
        elif status == STATUS_ASR:
            if sr.is_alive():
                sr.push_audio(block)
                while sr_response.qsize() > 0:
                    msg = sr_response.get()
                    if msg == "EOS":
                        print("\rHuman: {}".format(sentence))
                        text = brain.respond(sentence)
                        audio = tts.get_speech(text)
                        status = STATUS_TTS
                        tts.play_audio(audio)
                        status = STATUS_ASR
                        if "tạm biệt" in sentence:
                            sr.close()
                    else:
                        sentence = msg
                        print("\r-----: {}".format(msg), end='')

            else:
                status = STATUS_KWS
                print("===============================")
                print("========= GOOD BYE!!! =========")
                print("===============================")
                print("[INFO] Waiting keyword ...")
        elif status == STATUS_TTS:
            if sr.is_alive():
                sr.push_audio(bytearray(CHUNK))
            time.sleep(1. * CHUNK / BYTE_RATE)