コード例 #1
0
def turnOnLED():
    global ledState
    print('turnOnLED')
    tts.text_to_speech('피부 개선을 시작 합니다')
    led_blue.write_value(1)
    publish_led_state('true')
    ledState = True
コード例 #2
0
ファイル: design.py プロジェクト: dibyendu/Plexicon
    def __play(self, text, url=""):

        import urllib

        self.__block()
        try:
            urllib.urlopen("http://www.google.co.in/")
        except IOError:
            from path import ERROR_PATH

            data = open(ERROR_PATH).read()
        else:
            if url:
                data = urllib.urlopen(url).read()
            else:
                import re, tts

                text = re.sub("[^a-zA-Z0-9 \t.,;'\":(){}\[\]%!?/]", "", text)
                data = ""
                while len(text) > 100:
                    data += tts.text_to_speech(text[:100])
                    text = text[100:]
                data += tts.text_to_speech(text)
        finally:
            sound_file_path = HOME_PATH + "/.plexicon/data/sound.mp3"
            sound_file = open(sound_file_path, "wb")
            sound_file.write(data)
            sound_file.close()
            import sound

            sound.play_sound_file(sound_file_path)
            self.__un_block()
        return False
コード例 #3
0
def notify(result, args):
    # Make special, voice and toast True by default as initially args.nospecial, args.novoice and args.notoast are False
    special = not args.nospecial
    voice = not args.novoice
    toast = not args.notoast

    if special is True:
        special_notification(result, args)

    download_speed = str(result['downloadSpeed'])
    upload_speed = str(result['uploadSpeed'])
    description = f'Download Speed: {download_speed} MBPS\nUpload Speed: {upload_speed} MBPS'

    title = 'Internet Speed'

    # While in fullscreen mode, notifaction doesn't work in Windows
    # Convert text to speech and play the text
    if voice is True:
        text_to_speech(description)

    # Print as Windows 10 notification
    if toast is True:
        toaster = ToastNotifier()
        toaster.show_toast(title, description)

    # Print in the colsole
    log(f'\nTime: {date_time()}\n{description}')
コード例 #4
0
def recognize():

    adjust_mic()
    config.current_line = ""
    with sr.Microphone() as source:
        print("Say something!")
        audio = r.listen(source, phrase_time_limit=7)

    try:
        config.current_line = r.recognize_wit(audio, key=WIT_AI_KEY)
        parse.parse()
        print("I thinks you said " + config.current_line)
        if "" not in (config.current_lang, config.current_objective):

            tts.text_to_speech(
                f"Pulling up {config.current_objective} in the {config.current_lang} documentation"
            )
            searchdoc.search()
            config.current_objective = ""
            config.current_lang = ""
            config.is_active = False
        if config.current_line != "" and (
            config.current_lang != "" or config.current_objective != ""
        ):
            tts.text_to_speech("Could you repeat that?")
    except sr.UnknownValueError:
        print("Doc could not understand audio")
    except sr.RequestError as e:
        print("Could not request results from Doc; {0}".format(e))
コード例 #5
0
ファイル: design.py プロジェクト: PiyushGoyal443/Plexicon
 def __play(self, text, url=''):
     import urllib
     self.__block()
     try:
         urllib.urlopen('http://www.google.co.in/')
     except IOError:
         from path import ERROR_PATH
         data = open(ERROR_PATH).read()
     else:
         if url:
             data = urllib.urlopen(url).read()
         else:
             import re, tts
             text = re.sub('[^a-zA-Z0-9 \t.,;\'":(){}\[\]%!?/]', '', text)
             data = ''
             while len(text) > 100:
                 data += tts.text_to_speech(text[:100])
                 text = text[100:]
             data += tts.text_to_speech(text)
     finally:
         sound_file_path = HOME_PATH + '/.plexicon/data/sound.mp3'
         sound_file = open(sound_file_path, 'wb')
         sound_file.write(data)
         sound_file.close()
         import sound
         sound.play_sound_file(sound_file_path)
         self.__un_block()
     return False
コード例 #6
0
async def tts(context):
    try:
        text = context.message.content[7:]
        text_to_speech(text)
        with open('ttsFile.mp3', 'rb') as f:
            await context.send(file=File(f, 'TTS.mp3'))
    except Exception as e:
        print(e)
コード例 #7
0
ファイル: app.py プロジェクト: JaesungBae/flask_playground
def homepage():
    if request.method == 'POST':
        text = request.form['speech']
        gender = request.form['voices']
        text_to_speech(text, gender)
        return render_template('tts_example.html')
    else:
        return render_template('tts_example.html')
コード例 #8
0
def startAnalysis():
    print('startAnalysis')
    tts.text_to_speech('피부 분석 시작')
    global lastData
    global ledState
    if not ledState:
        generate_analysis_history()
        tts.text_to_speech('피부 분석 완료')
コード例 #9
0
def turnOffLED():
    global ledState
    print('turnOffLED')
    tts.text_to_speech('피부 개선을 종료 합니다')
    led_blue.write_value(0)
    publish_led_state('false')
    ledState = False
    generate_therapy_history()
コード例 #10
0
 def callback(recognizer, audio):
     try:
         config.current_line = r.recognize_wit(audio, key=WIT_AI_KEY)
         print(config.current_line)
         for pronunciation in doc_pronunciations:
             if pronunciation in config.current_line:
                 config.is_active = True
                 print("I think you said: " + config.current_line)
                 tts.text_to_speech("Doc here!")
                 stop(stop_listening)
                 break
     except sr.UnknownValueError:
         print("Doc could not understand audio")
     except sr.RequestError as e:
         print("Could not request results from Doc; {0}".format(e))
コード例 #11
0
def synthesize_speech(message):
    """
    Ответ голосовым сообщением, содержащим озвученное
    текстовое сообщение пользователя
    """

    user_name = define_user_name(message.chat)
    print("Запрос на синтез речи от {}".format(user_name))

    file_name = text_to_speech(message.text)

    bot.send_voice(message.chat.id, open(file_name, 'rb'))
コード例 #12
0
    def delete_sound(self, sound_id):
        r = requests.delete('{}/customers/{}/sounds/{}'.format(URL_PREFIX, self.customer_id, sound_id), auth=self.auth)
        r.raise_for_status()

if __name__ == "__main__":
    auth = ('xxx', 'xxx')
    client = SimwoodClient(auth)

    prompts = client.get_prompts()
    print(prompts)
    for prompt in prompts:
        if prompt['name'] == 'test':
            client.delete_sound(prompt['id'])
    import tts
    audio = tts.text_to_speech("This is a test")
    prompt_id = client.create_prompt("test", audio)
    print(prompt_id)

    endpoints = client.get_endpoints()
    print(endpoints)
    for endpoint in endpoints:
        if endpoint['name'] == 'test':
            client.delete_endpoint(endpoint['id'])
    endpoint_id = client.create_ivr_endpoint('{:03d}'.format(27), 'test', prompt_id)

    number = client.get_number("02081253476")
    print(number)
    client.update_destination(number, endpoint_id)

コード例 #13
0
def make_slideshow(image_path, date):
    #Load image paths
    img_path_list = load_img_paths(image_path)

    video_background = cv2.imread("background.png")
    if video_background is None:
        print("Couldn't find background.png")
        exit()
    img_clips = []
    i = 0
    # Overlay all images onto background
    for img_path in img_path_list:
        # Load and then resize/overlay image onto background
        img = cv2.imread(img_path)
        if img is None:
            print("Problem with {}".format(img_path))
        # Flip color of image so it doesn't look weird
        img = img[:, :, ::-1]
        img = overlay_image(img, video_background)

        # Make sure ./temp_audio exists, if not then create it
        if not os.path.exists("./temp_audio"):
            os.makedirs("./temp_audio")

        # Get text
        text = img_to_text(img)
        tts_path = os.path.join("temp_audio", "{}.mp3".format(i))
        tts = True
        try:
            text_to_speech(text, tts_path)
        except Exception as e:
            if "No text to speak" in str(e):
                tts = False

        # Find duration for image based on tts length
        duration = 4
        if tts:
            tts_audio = AudioFileClip(tts_path)
            duration = tts_audio.duration
            tts_audio = tts_audio.set_duration(duration)

        # Make video clip out of image
        clip = ImageClip(img, duration=duration)
        if tts:
            clip = clip.set_audio(tts_audio)
        else:
            make_frame = lambda t: 2 * [0]
            blank_audio = AudioClip.AudioClip(make_frame=make_frame,
                                              duration=duration)
            clip = clip.set_audio(blank_audio)
        print("Finished with {}".format(img_path))
        img_clips.append(clip)

        i += 1

    # background_audio = AudioFileClip("Catmosphere - Candy-Coloured Sky.mp3")
    concat_clip = concatenate_videoclips(img_clips)
    # audio = CompositeAudioClip([concat_clip.audio, background_audio])
    # Add background music
    # concat_clip = concat_clip.set_audio(audio.set_duration(concat_clip.duration))

    if not os.path.exists("./videos"):
        os.makedirs("./videos")
    path = "./videos/{}.mp4".format(date)
    concat_clip.write_videofile(path, fps=24)
    # Clean temp_audio folder
    print("Clearing temp_audio")
    files = os.listdir("./temp_audio")
    for file_name in files:
        os.remove("./temp_audio/{}".format(file_name))
    return path
コード例 #14
0
import sr
import wiki
import tts
import stackoverflow as so
import webbrowser

titles = []
links = []

tts.text_to_speech(
    "hello Sir ,i can only search the data which is available in wikipedia and stackoverflow: "
)

while (True):
    tts.text_to_speech("sir which option you want to use: ")
    option = input("1: wikipedia \n2: stackoverflow\n")

    if (option == "2"):
        tts.text_to_speech("what you want to search : ")
        text = sr.speech_output()
        if text != "failed":
            print(text)
            tts.text_to_speech(
                "ok sir ! wait for few time , i am searching, " + str(text) +
                " on stack overflow")
            titles = so.get_overflow_titles(text)
            links = so.get_overflow_links(text)

            count = 1
            for title in titles:
                print(str(count) + ":" + title)
コード例 #15
0
                        keyfile=None,
                        cert_reqs=ssl.CERT_REQUIRED,
                        tls_version=ssl.PROTOCOL_SSLv23,
                        ciphers=None)
    mqtt_client.username_pw_set(device_id, device_token)
    mqtt_client.on_connect = on_connect
    mqtt_client.on_message = on_message
    mqtt_client.on_publish = on_publish
    mqtt_client.on_subscribe = on_subscribe

    mqtt_client.connect('api.artik.cloud', 8883, 60)
    # mqtt_client.loop_forever()
    mqtt_client.loop_start()


tts.text_to_speech('안녕하세요. 등록된 사용자를 불러옵니다.')

btn_yes = gpio.GPIO(config['btn_yes_pinnum'], 'in')
btn_no = gpio.GPIO(config['btn_no_pinnum'], 'in')

user_list = firebase_manager.get_user_lists(mask_uid)

if len(user_list) == 0:
    tts.text_to_speech('등록된 사용자가 없습니다. 작동을 종료합니다.')

i = 0
while True:
    tts.text_to_speech(user_list[i][1])
    tts.text_to_speech('님 이신가요?')

    try:
コード例 #16
0
     renderer.render(heading) for heading in markdown.get_headings(block)
 ]
 if headings:
     name = headings[0]
 elif ii == 0:
     name = "Initial"
 else:
     print("Disconnected block (no heading) - skipping")
     continue
 links = markdown.get_links(block)
 actions = {}
 for link in links:
     key = list(filter(str.isdigit, renderer.render(link)))
     if key and link.dest.startswith('#'):
         actions[key[0]] = link.dest[1:]
 audio = tts.text_to_speech(text)
 sys.stdout.flush()
 prompt_id = client.create_prompt(prefix + name, audio)
 print("Created prompt {} ({})".format(prefix + name, prompt_id))
 sys.stdout.flush()
 endpoint_id = client.create_ivr_endpoint('{:03d}'.format(short_code),
                                          prefix + name, prompt_id)
 print("Created endpoint {} ({})".format(prefix + name, endpoint_id))
 sys.stdout.flush()
 short_code += 1
 endpoints.append((endpoint_id, actions))
 if ii == 0:
     initial_endpoint_id = endpoint_id
     initial_endpoint_name = prefix + name
 for heading in headings:
     endpoint_ids[heading.lower()] = endpoint_id