def result(): message = request.form['message'] number = request.form['number'] speech_config = SpeechConfig( subscription="0a6a0817af9f46aea9054beaa3d30290", region="westeurope") audio_config = AudioOutputConfig(filename="message_fr.wav") speech_config.speech_synthesis_voice_name = "fr-FR-DeniseNeural" synthesizer = SpeechSynthesizer(speech_config=speech_config, audio_config=audio_config) synthesizer.speak_text_async(message) # Add your subscription key and endpoint subscription_key = "e134037165514c648a57bf6ccc95e541" endpoint = "https://api.cognitive.microsofttranslator.com" # Add your location, also known as region. The default is global. # This is required if using a Cognitive Services resource. location = "francecentral" path = '/translate' constructed_url = endpoint + path params = {'api-version': '3.0', 'from': 'fr', 'to': ['en']} constructed_url = endpoint + path headers = { 'Ocp-Apim-Subscription-Key': subscription_key, 'Ocp-Apim-Subscription-Region': location, 'Content-type': 'application/json', 'X-ClientTraceId': str(uuid.uuid4()) } # You can pass more than one object in body. body = [{'text': message}] quest = requests.post(constructed_url, params=params, headers=headers, json=body) response = quest.json() translator = response[0]["translations"][0]["text"] audio_config = AudioOutputConfig(filename="message_en.wav") speech_config.speech_synthesis_voice_name = "en-US-AriaNeural" synthesizer = SpeechSynthesizer(speech_config=speech_config, audio_config=audio_config) synthesizer.speak_text_async(translator) data = {"number": number} with open("limit.json", "w") as file: json.dump(data, file) return (message)
async def setup_azure(filename): """ Returns an Azure Speech Synthesizer pointing to the given filename """ auto_detect_source_language_config = None speech_config = SpeechConfig(subscription=setup['azure']['key'], region=setup['azure']['region']) if setup['azure']['voice'] == '' or setup['azure']['voice'] == 'default': auto_detect_source_language_config = AutoDetectSourceLanguageConfig( None, None) else: speech_config.speech_synthesis_voice_name = setup['azure']['voice'] if filename == None: audio_config = AudioOutputConfig(use_default_speaker=True) else: audio_config = AudioOutputConfig(filename=filename) synthesizer = SpeechSynthesizer( speech_config=speech_config, audio_config=audio_config, auto_detect_source_language_config=auto_detect_source_language_config) return synthesizer
#!/usr/bin/python3 from azure.cognitiveservices.speech import AudioDataStream, SpeechConfig, SpeechSynthesizer, SpeechSynthesisOutputFormat import azure.cognitiveservices.speech as speechsdk from azure.cognitiveservices.speech.audio import AudioOutputConfig voice = "zh-CN-XiaoxiaoNeural" text = '你好' speech_config = SpeechConfig(subscription="3cb77646eea84168b348969306ff2a3c", region="eastus") speech_config.speech_synthesis_voice_name = voice audio_config = AudioOutputConfig(filename="file.wav") synthesizer = SpeechSynthesizer(speech_config=speech_config, audio_config=audio_config) result = synthesizer.speak_text_async(text).get() # Check result if result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted: print("Speech synthesized to speaker for text [{}] with voice [{}]".format( text, voice)) elif result.reason == speechsdk.ResultReason.Canceled: cancellation_details = result.cancellation_details print("Speech synthesis canceled: {}".format(cancellation_details.reason)) if cancellation_details.reason == speechsdk.CancellationReason.Error: print("Error details: {}".format(cancellation_details.error_details))