コード例 #1
0
def result():
    message = request.form['message']
    number = request.form['number']

    speech_config = SpeechConfig(
        subscription="0a6a0817af9f46aea9054beaa3d30290", region="westeurope")
    audio_config = AudioOutputConfig(filename="message_fr.wav")
    speech_config.speech_synthesis_voice_name = "fr-FR-DeniseNeural"
    synthesizer = SpeechSynthesizer(speech_config=speech_config,
                                    audio_config=audio_config)
    synthesizer.speak_text_async(message)

    # Add your subscription key and endpoint
    subscription_key = "e134037165514c648a57bf6ccc95e541"
    endpoint = "https://api.cognitive.microsofttranslator.com"

    # Add your location, also known as region. The default is global.
    # This is required if using a Cognitive Services resource.
    location = "francecentral"

    path = '/translate'
    constructed_url = endpoint + path

    params = {'api-version': '3.0', 'from': 'fr', 'to': ['en']}
    constructed_url = endpoint + path

    headers = {
        'Ocp-Apim-Subscription-Key': subscription_key,
        'Ocp-Apim-Subscription-Region': location,
        'Content-type': 'application/json',
        'X-ClientTraceId': str(uuid.uuid4())
    }

    # You can pass more than one object in body.
    body = [{'text': message}]

    quest = requests.post(constructed_url,
                          params=params,
                          headers=headers,
                          json=body)
    response = quest.json()

    translator = response[0]["translations"][0]["text"]

    audio_config = AudioOutputConfig(filename="message_en.wav")
    speech_config.speech_synthesis_voice_name = "en-US-AriaNeural"
    synthesizer = SpeechSynthesizer(speech_config=speech_config,
                                    audio_config=audio_config)
    synthesizer.speak_text_async(translator)

    data = {"number": number}
    with open("limit.json", "w") as file:
        json.dump(data, file)

    return (message)
コード例 #2
0
ファイル: test.py プロジェクト: raindroid/Simon_Says
def voice_from_text(text, path):
    filepath = "{}/speech.wav".format(path)
    if os.path.isfile(filepath): os.remove(filepath)
    
    speech_config = speechsdk.SpeechConfig(subscription=api_keys["microsoft-speech"]["key"], region=api_keys["microsoft-speech"]["region"])
    audio_config = AudioOutputConfig(filename=filepath)
    synthesizer = SpeechSynthesizer(speech_config=speech_config, audio_config=audio_config)
    synthesizer.speak_text_async("The Birch canoe slid on the smooth planks. Glue the sheet to the dark blue background.")

    return path
コード例 #3
0
    def __voice_translate(self, phrase: str) -> str:
        # Define the path to tmp/sound
        audio_path = self.OUTPUT_DIRECTORY / "sound"
        # Generate the wav name using a slugified version of the German word.
        wav_name = f"{slugify(phrase, separator='_')}.wav"
        # Join all the path ingredients together
        audio_file = audio_path / wav_name

        # Convert to a string because AudioOutputConfig doesn't like the libpath representation.
        audio_path_str = audio_file.absolute()

        # AudioOutputConfig specifies the parent directory must already exist so we ensure that `/tmp/sound` exists.
        if not audio_path.exists():
            os.makedirs(audio_path)

        try:
            speech_config = speechsdk.SpeechConfig(
                subscription=self.CONFIG.AZURE_SPEECH_KEY,
                region=self.CONFIG.VOICE_SUBSCRIPTION_REGION)
            audio_config = AudioOutputConfig(filename=str(audio_path_str))
        except NameError:
            # Because of the `audio_path` check we should never hit this exception unless the user has deleted the tmp
            # folder between line 113 and now.
            logging.critical(
                f"'{audio_path}' does not exist. Unable to create '{wav_name}'."
            )
            sys.exit(errno.ENOENT)
        except ValueError:
            logging.critical(
                "Subscription key must be given. Ensure 'AZURE_SPEECH_KEY' environment variable is set."
            )
            sys.exit(errno.EPERM)

        speech_synthesizer = speechsdk.SpeechSynthesizer(
            speech_config=speech_config, audio_config=audio_config)

        ssml_string = self.CONFIG.AZURE_SSML_CONF.open().read().format(phrase)

        result = speech_synthesizer.speak_ssml_async(ssml_string).get()

        if result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted:
            logging.info(f"Speech synthesised for text {phrase}.")
            return audio_path_str
        elif result.reason == speechsdk.ResultReason.Canceled:
            cancellation_details = result.cancellation_details
            logging.error(
                f"Speech synthesis canceled: {cancellation_details.reason}.")
            if cancellation_details.reason == speechsdk.CancellationReason.Error:
                if cancellation_details.error_details:
                    logging.error(
                        f"Error details: {cancellation_details.error_details}."
                    )
            logging.error("Did you update the subscription info?")
コード例 #4
0
ファイル: test.py プロジェクト: AnupMahat/Schabu
def welcome_message(name):
    speech_config = speechsdk.SpeechConfig(
        subscription="b58d19e457574aa39bc0f8b9b763cd55",
        region="australiaeast")
    audio_config = AudioOutputConfig(
        filename=
        "C:/Users/Pranav Patel/Documents/schabu/back_end/python/welcome.wav")
    synthesizer = SpeechSynthesizer(speech_config=speech_config,
                                    audio_config=audio_config)
    text = "Hello " + name + "! Welcome to Schubu Recrutiment Process. Please Click on the Start button to begin the interview process."
    synthesizer.speak_text_async(text)
    print(text)
コード例 #5
0
ファイル: start.py プロジェクト: Daenecompass/tts_for_discord
async def setup_azure(filename):
    """
    Returns an Azure Speech Synthesizer pointing to the given filename
    """
    auto_detect_source_language_config = None
    speech_config = SpeechConfig(subscription=setup['azure']['key'],
                                 region=setup['azure']['region'])
    if setup['azure']['voice'] == '' or setup['azure']['voice'] == 'default':
        auto_detect_source_language_config = AutoDetectSourceLanguageConfig(
            None, None)
    else:
        speech_config.speech_synthesis_voice_name = setup['azure']['voice']
    if filename == None:
        audio_config = AudioOutputConfig(use_default_speaker=True)
    else:
        audio_config = AudioOutputConfig(filename=filename)
    synthesizer = SpeechSynthesizer(
        speech_config=speech_config,
        audio_config=audio_config,
        auto_detect_source_language_config=auto_detect_source_language_config)
    return synthesizer
コード例 #6
0
ファイル: Speech.py プロジェクト: Match-Yang/ClipSpeech
    def _do_tts(self, use_speaker: bool, ssml_config: str, output_file: str):
        print("Start: ", output_file)
        speech_config = SpeechConfig(subscription=self._subscription,
                                     region=self._region)
        audio_config = AudioOutputConfig(use_default_speaker=use_speaker)
        synthesizer = SpeechSynthesizer(speech_config=speech_config,
                                        audio_config=audio_config)

        result = synthesizer.speak_ssml_async(ssml_config).get()

        stream = AudioDataStream(result)
        stream.save_to_wav_file(output_file)
        print("Finished", output_file)
コード例 #7
0
ファイル: tts.py プロジェクト: nowwater/LG_webos
def tts(item):
    speech_config = SpeechConfig(
        subscription="bc0912f626b44d5a8bb00e4497644fa4", region="westus")
    audio_config = AudioOutputConfig(filename="./result.wav")

    synthesizer = SpeechSynthesizer(speech_config=speech_config,
                                    audio_config=audio_config)
    appendString = ""

    # if res == "OK":
    #     appendString = "is in direction you're looking"
    # else:
    #     appendString = "is not in direction you're looking"
    #

    result = synthesizer.speak_text_async(item + appendString).get()
    stream = AudioDataStream(result)
    stream.save_to_wav_file("./result.mp3")
コード例 #8
0
def speech(text, filename):
    format = AudioStreamFormat(samples_per_second=32000)
    audio_config = AudioOutputConfig(filename=filename)
    speech_synthesizer = speechsdk.SpeechSynthesizer(
        speech_config=speech_config, audio_config=audio_config)
    ssml = '<speak xmlns="http://www.w3.org/2001/10/synthesis" xmlns:mstts="http://www.w3.org/2001/mstts" xmlns:emo="http://www.w3.org/2009/10/emotionml" version="1.0" xml:lang="zh-cn"><voice name="zh-cn-YunyangNeural"><prosody rate="-10%" pitch="0%">'
    ssml = ssml + text + '</prosody></voice></speak>'
    result = speech_synthesizer.speak_ssml(ssml)
    if result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted:
        print("Speech synthesized to speaker for text [{}]".format(text))
        return True
    elif result.reason == speechsdk.ResultReason.Canceled:
        cancellation_details = result.cancellation_details
        print("Speech synthesis canceled: {}".format(
            cancellation_details.reason))
        if cancellation_details.reason == speechsdk.CancellationReason.Error:
            if cancellation_details.error_details:
                print("Error details: {}".format(
                    cancellation_details.error_details))
        print("Did you update the subscription info?")
        return False
    return True
コード例 #9
0
key = "45af936cd5f54c8790ba15d2950766bc"
endpoint = "https://analisistextohack.cognitiveservices.azure.com/"

from azure.ai.textanalytics import TextAnalyticsClient
from azure.core.credentials import AzureKeyCredential

import pydot
import os

documents=[' ']

listapalabras=[' ']


speech_config = SpeechConfig(subscription="546da9a0b95d4b29a806c1c7d8d147bc", region="southcentralus")
audio_config = AudioOutputConfig(filename="file.wav")

app = Flask(__name__,template_folder="templates")
app.config['MAX_CONTENT_LENGTH'] = 2 * 1024 * 1024
app.config['UPLOAD_EXTENSIONS'] = ['.txt']
app.config['UPLOAD_PATH'] = 'uploads'


def leerfichero():
    global documents
    archivo = open("uploads/archivo.txt", "r") 
    documents[0] = archivo.read() 
    documents[0] = documents[0].rstrip('\n')

def authenticate_client():
    ta_credential = AzureKeyCredential(key)
コード例 #10
0
robot = Reachy(
    right_arm=parts.RightArm(io='ws', hand='force_gripper'),
    left_arm=parts.LeftArm(io='ws', hand='force_gripper'),
)

engine = pyttsx3.init()

robot.left_arm.shoulder_roll.goal_position = 0
robot.left_arm.arm_yaw.goal_position = 0
robot.left_arm.elbow_pitch.goal_position = 0
robot.left_arm.hand.forearm_yaw.goal_position = 0

speech_config = SpeechConfig(subscription="subscriptionkey",
                             region="westeurope")
audio_config = AudioOutputConfig(use_default_speaker=True)
synthesizer = SpeechSynthesizer(speech_config=speech_config,
                                audio_config=audio_config)

ASSISTANT_API_ENDPOINT = 'embeddedassistant.googleapis.com'
END_OF_UTTERANCE = embedded_assistant_pb2.AssistResponse.END_OF_UTTERANCE
DIALOG_FOLLOW_ON = embedded_assistant_pb2.DialogStateOut.DIALOG_FOLLOW_ON
CLOSE_MICROPHONE = embedded_assistant_pb2.DialogStateOut.CLOSE_MICROPHONE
PLAYING = embedded_assistant_pb2.ScreenOutConfig.PLAYING
DEFAULT_GRPC_DEADLINE = 60 * 3 + 5

global spokenAnswer
global followUp
global followUpSentence

spokenAnswer = ""
コード例 #11
0
        response = requests.get(search_url, headers=headers, params=params)
        response.raise_for_status()
        search_results = response.json()
        i2download = [img["contentUrl"]
                      for img in search_results["value"]][randrange(20)]
        print(i2download)
        response = requests.get(i2download)
        ext = i2download[-3:]
        imgfile = open(
            "results/{}/{:04d}/{:04d}.{}".format(uuid, snumb, wi, ext), "wb")
        imgfile.write(response.content)
        imgfile.close()
        print("Got {}".format(wi))
        time.sleep(2)

    audio_config = AudioOutputConfig(
        filename="results/{}/{:04d}/wav.wav".format(uuid, snumb))
    synthesizer = SpeechSynthesizer(speech_config=speech_config,
                                    audio_config=audio_config)
    synthesizer.speak_text_async(sentence)

    os.system(
        "python collage_maker.py -o results/{0}/slide-{1:04d}.png -f results/{0}/{1:04d} -w 800 -i 600"
        .format(uuid, snumb))

    print(
        'ffmpeg -loop 1 -i results/{0}/slide-{1:04d}.png -i results/{0}/{1:04d}/wav.wav -c:v libx264 -tune stillimage -c:a aac -b:a 192k -pix_fmt yuv420p -shortest results/{0}/{1:04d}.mp4 -vf "pad=ceil(iw/2)*2:ceil(ih/2)*2"'
        .format(uuid, snumb))

    pngs = [
        x for x in os.listdir("results/{}".format(uuid)) if x[-3:] == "png"
    ]
コード例 #12
0
ファイル: TTSmain.py プロジェクト: jchang274/TTS
from azure.cognitiveservices.speech import AudioDataStream
from azure.cognitiveservices.speech import SpeechConfig
from azure.cognitiveservices.speech import SpeechSynthesizer
from azure.cognitiveservices.speech import SpeechSynthesisOutputFormat
from azure.cognitiveservices.speech.audio import AudioOutputConfig
import os
import time
time_now = time.strftime("%Y.%m.%d.%H%M%S", time.localtime())
KEY = 'dece00af114f42a8b6c7324dca4d4125'
REGION = "southeastasia"
speech_config = SpeechConfig(subscription=KEY, region=REGION)
mp3_format = 'Audio16Khz32KBitRateMonoMp3'
speech_config.set_speech_synthesis_output_format(
    SpeechSynthesisOutputFormat[mp3_format])
audio_config = AudioOutputConfig(filename=r'Cache\ConvertCache.mp3')
synthesizer = SpeechSynthesizer(speech_config=speech_config,
                                audio_config=audio_config)

data1 = data2 = data3 = ""
data1 = open("Cache\Top.txt", "r").read()
data2 = open("InputText.txt", "r").read()
data3 = open("Cache\Bottom.txt", "r").read()
data1 += "\n"
data1 += data2
data1 += "\n"
data1 += data3
open("Cache\InputText.xml", "w").write(data1)

ssml_string = open("Cache\InputText.xml", "r").read()
synthesizer.speak_ssml_async(ssml_string)
filename1 = r'Cache\ConvertCache.mp3'
コード例 #13
0
# Set up a speech synthesizer using the default speaker as audio output.
#
# https://docs.microsoft.com/azure/cognitive-services/speech-service/language-support
#
# -----------------------------------------------------------------------

speech_conf = speechsdk.SpeechConfig(subscription=key, region=location)

if args.lang:
    speech_conf.speech_synthesis_language = args.lang
if args.voice:
    speech_conf.speech_synthesis_voice_name = args.voice

if args.output:
    file_location = os.path.join(get_cmd_cwd(), args.output)
    audio_conf = AudioOutputConfig(filename=file_location)
    speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=speech_conf,
                                                     audio_config=audio_conf)
else:
    speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=speech_conf)

# ----------------------------------------------------------------------
# Synthesize the text to speech. When the following line is run expect
# to hear the synthesized speech.
# ----------------------------------------------------------------------

if len(text):
    for sentence in text:
        result = speech_synthesizer.speak_text_async(sentence).get()
        if str(result.reason) == "ResultReason.Canceled":
            print(
コード例 #14
0
import azure.cognitiveservices.speech as speechsdk
from azure.cognitiveservices.speech import AudioDataStream, SpeechConfig, SpeechSynthesizer, SpeechSynthesisOutputFormat
from azure.cognitiveservices.speech.audio import AudioOutputConfig

speech_key, service_region = "UseYourSpeechAPI", "eastus"

speech_config = speechsdk.SpeechConfig(subscription=speech_key,
                                       region=service_region)

voice = "Microsoft Server Speech Text to Speech Voice (en-US, GuyNeural)"  #en-US-GuyRUS
speech_config.speech_synthesis_voice_name = voice
speech_config.set_speech_synthesis_output_format(
    SpeechSynthesisOutputFormat["Riff24Khz16BitMonoPcm"])

audio_config = AudioOutputConfig(filename="c:/OutputVoiceFile.mp3")

synthesizer = SpeechSynthesizer(speech_config=speech_config,
                                audio_config=audio_config)
synthesizer.speak_text_async(
    "Hello World, This is a test of creating a playable mp3 file")
コード例 #15
0
# In[ ]:

ap = argparse.ArgumentParser()
ap.add_argument("-a",
                "--audio_name",
                required=True,
                help="Please Enter Audio File Name(Without extension)")
args = vars(ap.parse_args())

# In[2]:

speech_config = SpeechConfig(subscription="__KEY", region="Region")

# In[3]:

audio_config = AudioOutputConfig(filename="public/python/output_audio_files/" +
                                 args['audio_name'] + "_summary.wav")

# In[4]:

with open("summary/" + args['audio_name'] + "_summary.txt", 'r') as file:
    data = file.read().replace('\n', '')

# In[5]:

data

# In[11]:

synthesizer = SpeechSynthesizer(speech_config=speech_config,
                                audio_config=audio_config)
synthesizer.speak_text_async(data)
コード例 #16
0
load_dotenv()

speech_key, service_region = os.getenv('SPEECH_RESOURCE_KEY'), "westus"

# A speech Synthesizer is created with the given settings.
speech_config = SpeechConfig(subscription=speech_key, region=service_region)

print("Enter your choice :")
print("1. Output from speaker")
print("2. Save output to a file\n")
choice = int(input())

# Output is recieved via the device speaker

if (choice == 1):
    audio_config = AudioOutputConfig(use_default_speaker=True)

# Output is saved in the files whose name is provided as an input

elif (choice == 2):
    audio_config = AudioOutputConfig(
        filename=("tts_output/" +
                  input("Enter the name of the output file : ") + ".wav"))

# A speech Synthesizer is initialized with given settings

synthesizer = SpeechSynthesizer(speech_config=speech_config,
                                audio_config=audio_config)

# An asynchronous call to the api is made with the input waiting for the output