_SUPPORT_VOLUME = ["soft", "medium", "loud"] KAKAO_API_URL = 'https://kakaoi-newtone-openapi.kakao.com/v1/synthesize' CONF_KAKAO_API_KEY = "api_key" CONF_VOICE = "voice" DEFAULT_VOICE = "WOMAN_READ_CALM" CONF_VOLUME = "volume" DEFAULT_VOLUME = "medium" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_KAKAO_API_KEY): cv.string, vol.Optional(CONF_VOICE, default=DEFAULT_VOICE): cv.string, vol.Optional(CONF_VOLUME, default=DEFAULT_VOLUME): cv.string, }) async def async_get_engine(hass, config, discovery_info=None): """Set up kakao tts speech component.""" return KakaoSpeechManager(hass, config) class KakaoSpeechManager(Provider): """The Kakao TTS speech API provider.""" def __init__(self, hass, config): """Init Kakao TTS service.""" self._hass = hass
DEFAULT_RATE = 0 DEFAULT_VOLUME = 0 DEFAULT_PITCH = "default" DEFAULT_CONTOUR = "" DEFAULT_REGION = "eastus" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_API_KEY): cv.string, vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORTED_LANGUAGES), vol.Optional(CONF_GENDER, default=DEFAULT_GENDER): vol.In(GENDERS), vol.Optional(CONF_TYPE, default=DEFAULT_TYPE): cv.string, vol.Optional(CONF_RATE, default=DEFAULT_RATE): vol.All(vol.Coerce(int), vol.Range(-100, 100)), vol.Optional(CONF_VOLUME, default=DEFAULT_VOLUME): vol.All(vol.Coerce(int), vol.Range(-100, 100)), vol.Optional(CONF_PITCH, default=DEFAULT_PITCH): cv.string, vol.Optional(CONF_CONTOUR, default=DEFAULT_CONTOUR): cv.string, vol.Optional(CONF_REGION, default=DEFAULT_REGION): cv.string, }) def get_engine(hass, config, discovery_info=None): """Set up Microsoft speech component.""" return MicrosoftProvider( config[CONF_API_KEY],
CONF_APIKEY = 'api_key' CONF_SECRETKEY = 'secret_key' CONF_SPEED = 'speed' CONF_PITCH = 'pitch' CONF_VOLUME = 'volume' PERSON = 'person' TOKEN_INTERFACE = 'https://openapi.baidu.com/oauth/2.0/token' TEXT2AUDIO_INTERFACE = 'http://tsn.baidu.com/text2audio' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORT_LANGUAGES), vol.Optional(CONF_APIKEY): cv.string, vol.Optional(CONF_SECRETKEY):cv.string, vol.Optional(CONF_SPEED,default='5'): cv.string, vol.Optional(CONF_PITCH,default='5'): cv.string, vol.Optional(CONF_VOLUME,default='5'): cv.string, vol.Optional(PERSON,default='0'): cv.string, }) def get_engine(hass, config): lang = config.get(CONF_LANG) apiKey = config.get(CONF_APIKEY) secretKey = config.get(CONF_SECRETKEY) speed = config.get(CONF_SPEED) pitch = config.get(CONF_PITCH) volume = config.get(CONF_VOLUME) person = config.get(PERSON) if apiKey == None:
vol.Clamp(min=MIN_PITCH, max=MAX_PITCH)) GAIN_SCHEMA = vol.All(vol.Coerce(float), vol.Clamp(min=MIN_GAIN, max=MAX_GAIN)) PROFILES_SCHEMA = vol.All(cv.ensure_list, [vol.In(SUPPORTED_PROFILES)]) TEXT_TYPE_SCHEMA = vol.All(vol.Lower, vol.In(SUPPORTED_TEXT_TYPES)) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_KEY_FILE): cv.string, vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORTED_LANGUAGES), vol.Optional(CONF_GENDER, default=DEFAULT_GENDER): GENDER_SCHEMA, vol.Optional(CONF_VOICE, default=DEFAULT_VOICE): VOICE_SCHEMA, vol.Optional(CONF_ENCODING, default=DEFAULT_ENCODING): SCHEMA_ENCODING, vol.Optional(CONF_SPEED, default=DEFAULT_SPEED): SPEED_SCHEMA, vol.Optional(CONF_PITCH, default=DEFAULT_PITCH): PITCH_SCHEMA, vol.Optional(CONF_GAIN, default=DEFAULT_GAIN): GAIN_SCHEMA, vol.Optional(CONF_PROFILES, default=[]): PROFILES_SCHEMA, vol.Optional(CONF_TEXT_TYPE, default=DEFAULT_TEXT_TYPE): TEXT_TYPE_SCHEMA, }) async def async_get_engine(hass, config, discovery_info=None): """Set up Google Cloud TTS component.""" key_file = config.get(CONF_KEY_FILE)
from homeassistant.const import CONF_HOST, CONF_PORT from homeassistant.helpers.aiohttp_client import async_get_clientsession from urllib.parse import quote _LOGGER = logging.getLogger(__name__) SUPPORT_LANGUAGES = ["en-US", "en-GB", "de-DE", "es-ES", "fr-FR", "it-IT"] DEFAULT_LANG = "en-US" DEFAULT_HOST = "localhost" DEFAULT_PORT = 59126 PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORT_LANGUAGES), vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port }) def get_engine(hass, config): """Set up Pico speech component.""" return PicoProvider(hass, config[CONF_LANG], config[CONF_HOST], config[CONF_PORT]) class PicoProvider(Provider): """The Pico TTS API provider.""" def __init__(self, hass, lang, host, port): """Initialize Pico TTS provider."""
CONF_CODEC = 'codec' CONF_VOICE = 'voice' CONF_EMOTION = 'emotion' CONF_SPEED = 'speed' DEFAULT_LANG = 'en-US' DEFAULT_CODEC = 'mp3' DEFAULT_VOICE = 'zahar' DEFAULT_EMOTION = 'neutral' DEFAULT_SPEED = 1 PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_API_KEY): cv.string, vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORT_LANGUAGES), vol.Optional(CONF_CODEC, default=DEFAULT_CODEC): vol.In(SUPPORT_CODECS), vol.Optional(CONF_VOICE, default=DEFAULT_VOICE): vol.In(SUPPORT_VOICES), vol.Optional(CONF_EMOTION, default=DEFAULT_EMOTION): vol.In(SUPPORTED_EMOTION), vol.Optional(CONF_SPEED, default=DEFAULT_SPEED): vol.Range(min=MIN_SPEED, max=MAX_SPEED) }) @asyncio.coroutine def async_get_engine(hass, config): """Set up VoiceRSS speech component.""" return YandexSpeechKitProvider(hass, config) class YandexSpeechKitProvider(Provider): """VoiceRSS speech api provider."""
import voluptuous as vol import homeassistant.helpers.config_validation as cv from homeassistant.components.tts import CONF_LANG, PLATFORM_SCHEMA, Provider from homeassistant.helpers.aiohttp_client import async_get_clientsession _LOGGER = logging.getLogger(__name__) GOOGLE_SPEECH_URL = "https://api.jiluxinqing.com/api/service/tts?text=" CONF_BEFORE_MESSAGE = "before_message" CONF_AFTER_MESSAGE = "after_message" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_BEFORE_MESSAGE, default=""): cv.string, vol.Optional(CONF_AFTER_MESSAGE, default=""): cv.string }) async def async_get_engine(hass, config): """Set up Google speech component.""" return GoogleProvider(hass, config) class GoogleProvider(Provider): """The Google speech API provider.""" def __init__(self, hass, config): """Init Google TTS service.""" self.hass = hass self._before_message = config[CONF_BEFORE_MESSAGE]
'natia', # Georgian 'azamat', 'nazgul', # Kyrgyz 'talgat', # Tatar 'anatol' # Ukrainian ] CONF_VOICE = 'voice' CONF_API_URL = 'url' DEFAULT_VOICE = 'anna' DEFAULT_LANG = 'ru-RU' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_API_URL): cv.string, vol.Optional(CONF_VOICE, default=DEFAULT_VOICE): vol.In(SUPPORT_VOICES) }) SUPPORT_LANGUAGES = ['ru-RU'] SUPPORTED_OPTIONS = [CONF_VOICE] @asyncio.coroutine def async_get_engine(hass, config): """Set up RHVoice speech component.""" return RHVoiceProvider(hass, config) class RHVoiceProvider(Provider):
_LOGGER = logging.getLogger(__name__) CONF_VOICE = "voice" CONF_CODEC = "codec" SUPPORT_LANGUAGES = ["en_US"] SUPPORT_CODEC = ["WAVE_FILE"] DEFAULT_HOST = "localhost" DEFAULT_PORT = 5002 DEFAULT_LANG = "en_US" DEFAULT_CODEC = "WAVE_FILE" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port }) def get_engine(hass, config, discovery_info=None): return MozillaTTSProvider(hass, config) class MozillaTTSProvider(Provider): def __init__(self, hass, conf): """Init MaryTTS TTS service.""" self.hass = hass self.name = "MozillaTTS" self._host = conf.get(CONF_HOST) self._port = conf.get(CONF_PORT)
PITCH_SCHEMA = vol.All(vol.Coerce(int), vol.Range(0, 100)) RATE_SCHEMA = vol.All(vol.Coerce(int), vol.Range(0, 100)) VOICE_SCHEMA = vol.All(cv.string, vol.In(list(chain(*SUPPORTED_LANGUAGES.values())))) VOLUME_SCHEMA = vol.All(vol.Coerce(int), vol.Range(0, 100)) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_FORMAT, default=DEFAULT_FORMAT): FORMAT_SCHEMA, vol.Optional(CONF_PITCH, default=DEFAULT_PITCH): PITCH_SCHEMA, vol.Optional(CONF_RATE, default=DEFAULT_RATE): RATE_SCHEMA, vol.Optional(CONF_SSL, default=False): cv.boolean, vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean, vol.Optional(CONF_VOICE, default=DEFAULT_VOICE): VOICE_SCHEMA, vol.Optional(CONF_VOLUME, default=DEFAULT_VOLUME): VOLUME_SCHEMA, }) async def async_get_engine(hass, config, discovery_info=None): """Set up RHVoice speech component.""" return RHVoiceProvider(hass, config)
SUPPORT_LANGUAGES = [ 'af', 'sq', 'ar', 'hy', 'bn', 'ca', 'zh', 'zh-cn', 'zh-tw', 'zh-yue', 'hr', 'cs', 'da', 'nl', 'en', 'en-au', 'en-uk', 'en-us', 'eo', 'fi', 'fr', 'de', 'el', 'hi', 'hu', 'is', 'id', 'it', 'ja', 'ko', 'la', 'lv', 'mk', 'no', 'pl', 'pt', 'pt-br', 'ro', 'ru', 'sr', 'sk', 'es', 'es-es', 'es-mx', 'es-us', 'sw', 'sv', 'ta', 'th', 'tr', 'vi', 'cy', 'uk', 'bg-BG' ] DEFAULT_LANG = 'en' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORT_LANGUAGES), vol.Optional(CONF_API_KEY, ATTR_CREDENTIALS): cv.string, vol.Optional(CONF_SPEED, ATTR_CREDENTIALS): cv.string, vol.Optional(CONF_VOICE_TYPE, ATTR_CREDENTIALS): cv.string, }) async def async_get_engine(hass, config): """Set up Homely speech component.""" return HomelyProvider(hass, config) class HomelyProvider(Provider): """The Homely speech API provider.""" def __init__(self, hass, config): """Init Homely TTS service."""
DEFAULT_ENCODING = 'MP3' DEFAULT_VOICE_NAME = 'en-US-Wavenet-E' DEFAULT_SPEAKING_RATE = 1.0 DEFAULT_PITCH = 0.0 DEFAULT_INPUT = 'text' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_VOICE_NAME, default=DEFAULT_VOICE_NAME): vol.In(SUPPORTED_VOICE_NAMES), vol.Optional(CONF_SPEAKING_RATE, default=DEFAULT_SPEAKING_RATE): vol.All(vol.Coerce(float), vol.Range(-20.0, 20.0)), vol.Optional(CONF_PITCH, default=DEFAULT_PITCH): vol.All(vol.Coerce(float), vol.Range(-20.0, 20.0)), vol.Optional(CONF_ENCODING, default=DEFAULT_ENCODING): vol.In(SUPPORTED_ENCODINGS), vol.Optional(CONF_INPUT, default=DEFAULT_INPUT): vol.In(SUPPORTED_INPUTS), }) def get_engine(hass, config): # pylint: disable=import-error from google.oauth2 import service_account credentials = service_account.Credentials.from_service_account_file( hass.config.path('google-cloud.json')) from google.cloud import texttospeech
CONF_EMOTION = 'emotion' CONF_SPEED = 'speed' DEFAULT_LANG = 'en-US' DEFAULT_CODEC = 'mp3' DEFAULT_VOICE = 'zahar' DEFAULT_EMOTION = 'neutral' DEFAULT_SPEED = 1 PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_API_KEY): cv.string, vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORT_LANGUAGES), vol.Optional(CONF_CODEC, default=DEFAULT_CODEC): vol.In(SUPPORT_CODECS), vol.Optional(CONF_VOICE, default=DEFAULT_VOICE): vol.In(SUPPORT_VOICES), vol.Optional(CONF_EMOTION, default=DEFAULT_EMOTION): vol.In(SUPPORTED_EMOTION), vol.Optional(CONF_SPEED, default=DEFAULT_SPEED): vol.Range(min=MIN_SPEED, max=MAX_SPEED) }) SUPPORTED_OPTIONS = [ CONF_CODEC, CONF_VOICE, CONF_EMOTION, CONF_SPEED, ]
'Lulu-Mandarin-Chinese', 'Bente-Norwegian', 'Kari-Norwegian', 'Olav-Norwegian', 'Ania-Polish', 'Monika-Polish', 'Celia-Portuguese', 'ro-RO-Andrei-Romanian', 'Alyona-Russian', 'Mia-Scanian', 'Antonio-Spanish', 'Ines-Spanish', 'Maria-Spanish', 'Elin-Swedish', 'Emil-Swedish', 'Emma-Swedish', 'Erik-Swedish', 'Ipek-Turkish', 'Heather-US-English', 'Karen-US-English', 'Kenny-US-English', 'Laura-US-English', 'Micah-US-English', 'Nelly-US-English', 'Rod-US-English', 'Ryan-US-English', 'Saul-US-English', 'Sharon-US-English', 'Tracy-US-English', 'Will-US-English', 'Rodrigo-US-Spanish', 'Rosa-US-Spanish' ] PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORTED_LANGUAGES), vol.Optional(CONF_PITCH, default=DEFAULT_PITCH): cv.string, vol.Optional(CONF_BITRATE, default=DEFAULT_BITRATE): cv.string, }) def get_engine(hass, config, discovery_info=None): """Set up Reverso speech component.""" return ReversoProvider( config[CONF_LANG], config[CONF_PITCH], config[CONF_BITRATE], ) class ReversoProvider(Provider):
"audio/mp3": "mp3", "audio/mpeg": "mp3", "audio/ogg": "ogg", "audio/ogg;codecs=opus": "ogg", "audio/ogg;codecs=vorbis": "ogg", "audio/wav": "wav", } DEFAULT_VOICE = "en-US_AllisonV3Voice" DEFAULT_OUTPUT_FORMAT = "audio/mp3" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_URL, default=DEFAULT_URL): cv.string, vol.Required(CONF_APIKEY): cv.string, vol.Optional(CONF_VOICE, default=DEFAULT_VOICE): vol.In(SUPPORTED_VOICES), vol.Optional(CONF_OUTPUT_FORMAT, default=DEFAULT_OUTPUT_FORMAT): vol.In(SUPPORTED_OUTPUT_FORMATS), }) def get_engine(hass, config, discovery_info=None): """Set up IBM Watson TTS component.""" authenticator = IAMAuthenticator(config[CONF_APIKEY]) service = TextToSpeechV1(authenticator) service.set_service_url(config[CONF_URL]) supported_languages = list({s[:5] for s in SUPPORTED_VOICES}) default_voice = config[CONF_VOICE]
max=MAX_PITCH)) CONF_EMOTION = "emotion" DEFAULT_EMOTION = 0 MIN_EMOTION = 0 MAX_EMOTION = 2 EMOTION_SCHEMA = vol.All(vol.Coerce(int), vol.Clamp(min=MIN_EMOTION, max=MAX_EMOTION)) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_CLIENT_ID): cv.string, vol.Required(CONF_CLIENT_SECRET): cv.string, vol.Optional(CONF_VOICE, default=DEFAULT_VOICE): vol.In(SUPPORT_VOICES), vol.Optional(CONF_SPEED, default=DEFAULT_SPEED): SPEED_SCHEMA, vol.Optional(CONF_PITCH, default=DEFAULT_PITCH): PITCH_SCHEMA, vol.Optional(CONF_EMOTION, default=DEFAULT_EMOTION): EMOTION_SCHEMA, }) def get_engine(hass, config, discovery_info=None): """Set up naver_tts_premium speech component.""" return Naver_Premium(hass, config) class Naver_Premium(Provider): """The Naver_TTS_Premium speech API provider."""
CONF_APP_ID = 'app_id' CONF_SECRET_KEY = 'secret_key' CONF_SPEED = 'speed' CONF_PITCH = 'pitch' CONF_VOLUME = 'volume' CONF_PERSON = 'person' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORTED_LANGUAGES), vol.Required(CONF_APP_ID): cv.string, vol.Required(CONF_API_KEY): cv.string, vol.Required(CONF_SECRET_KEY): cv.string, vol.Optional(CONF_SPEED, default=5): vol.All( vol.Coerce(int), vol.Range(min=0, max=9) ), vol.Optional(CONF_PITCH, default=5): vol.All( vol.Coerce(int), vol.Range(min=0, max=9) ), vol.Optional(CONF_VOLUME, default=5): vol.All( vol.Coerce(int), vol.Range(min=0, max=15) ), vol.Optional(CONF_PERSON, default=0): vol.All( vol.Coerce(int), vol.Range(min=0, max=4) ), }) # Keys are options in the config file, and Values are options # required by Baidu TTS API. _OPTIONS = { CONF_PERSON: 'per', CONF_PITCH: 'pit', CONF_SPEED: 'spd',
"SPANISH_ES", "CHINESE_HK", "CHINESE_TW", "CHINESE_CN", "SWEDISH", "HUNGARIAN", "DUTCH_BE", "ARABIC_SA", "KOREAN", "CZECH", "DANISH", "HINDI", "GREEK", "JAPANESE" ] DEFAULT_LANG = "ENGLISH_US" CONF_SPEED = "speed" CONF_PITCH = "pitch" CONF_VOLUME = "volume" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORTED_LANGUAGES), vol.Optional(CONF_SPEED, default=0.5): vol.All(vol.Coerce(float), vol.Range(min=0, max=1)), vol.Optional(CONF_PITCH, default=0.5): vol.All(vol.Coerce(float), vol.Range(min=0, max=1)), vol.Optional(CONF_VOLUME, default=1): vol.All(vol.Coerce(float), vol.Range(min=0, max=1)), }) # Keys are options in the config file, and Values are options # required by Baidu TTS API. _OPTIONS = { CONF_PITCH: "pit", CONF_SPEED: "spd", CONF_VOLUME: "vol", } SUPPORTED_OPTIONS = [CONF_PITCH, CONF_SPEED, CONF_VOLUME]
'aiff', 'au', 'wav' ] CONF_VOICE = 'voice' CONF_CODEC = 'codec' DEFAULT_HOST = 'localhost' DEFAULT_PORT = 59125 DEFAULT_LANG = 'en-US' DEFAULT_VOICE = 'cmu-slt-hsmm' DEFAULT_CODEC = 'wav' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORT_LANGUAGES), vol.Optional(CONF_VOICE, default=DEFAULT_VOICE): cv.string, vol.Optional(CONF_CODEC, default=DEFAULT_CODEC): vol.In(SUPPORT_CODEC) }) async def async_get_engine(hass, config): """Set up MaryTTS speech component.""" return MaryTTSProvider(hass, config) class MaryTTSProvider(Provider): """MaryTTS speech api provider.""" def __init__(self, hass, conf): """Init MaryTTS TTS service."""
DEFAULT_LANG = 'en-us' DEFAULT_GENDER = 'Female' DEFAULT_TYPE = 'ZiraRUS' DEFAULT_OUTPUT = 'audio-16khz-128kbitrate-mono-mp3' DEFAULT_RATE = 0 DEFAULT_VOLUME = 0 DEFAULT_PITCH = "default" DEFAULT_CONTOUR = "" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_API_KEY): cv.string, vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORTED_LANGUAGES), vol.Optional(CONF_GENDER, default=DEFAULT_GENDER): vol.In(GENDERS), vol.Optional(CONF_TYPE, default=DEFAULT_TYPE): cv.string, vol.Optional(CONF_RATE, default=DEFAULT_RATE): vol.All(vol.Coerce(int), vol.Range(-100, 100)), vol.Optional(CONF_VOLUME, default=DEFAULT_VOLUME): vol.All(vol.Coerce(int), vol.Range(-100, 100)), vol.Optional(CONF_PITCH, default=DEFAULT_PITCH): cv.string, vol.Optional(CONF_CONTOUR, default=DEFAULT_CONTOUR): cv.string, }) def get_engine(hass, config): """Set up Microsoft speech component.""" return MicrosoftProvider(config[CONF_API_KEY], config[CONF_LANG], config[CONF_GENDER], config[CONF_TYPE], config[CONF_RATE], config[CONF_VOLUME], config[CONF_PITCH], config[CONF_CONTOUR])
gender = value.get(CONF_GENDER) if gender is None: gender = value[CONF_GENDER] = next( (chk_gender for chk_lang, chk_gender in MAP_VOICE if chk_lang == lang), None) if (lang, gender) not in MAP_VOICE: raise vol.Invalid("Unsupported language and gender specified.") return value PLATFORM_SCHEMA = vol.All( PLATFORM_SCHEMA.extend({ vol.Optional(CONF_LANG): str, vol.Optional(CONF_GENDER): str, }), validate_lang, ) async def async_get_engine(hass, config, discovery_info=None): """Set up Cloud speech component.""" cloud: Cloud = hass.data[DOMAIN] if discovery_info is not None: language = None gender = None else: language = config[CONF_LANG] gender = config[CONF_GENDER]
'John': '69010', #英语 '凯瑟琳': '69020', #英语 'Steve': '69030', #乔布斯、英语 '奥巴马': '69055', #英语、普通话 '小梅': '10003', #粤语 '玉儿': '68120', #台湾 '小强': '68010', #湖南 '小坤': '68030', #河南 '晓倩': '68040', #东北 '小蓉': '68060', #四川 '小莹': '68080', #陕西 } PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORT_LANGUAGES), vol.Optional(CONF_PERSON, default='小英'): cv.string, }) def get_engine(hass, config): lang = config.get(CONF_LANG) try: person = config.get(CONF_PERSON) except: person = '小英' tts_path = hass.config.path('tts') return iflytekTTS(lang, person, tts_path) class iflytekTTS(Provider):
GOOGLE_SPEECH_URL = "http://translate.google.com/translate_tts" MESSAGE_SIZE = 148 SUPPORT_LANGUAGES = [ 'af', 'sq', 'ar', 'hy', 'bn', 'ca', 'zh', 'zh-cn', 'zh-tw', 'zh-yue', 'hr', 'cs', 'da', 'nl', 'en', 'en-au', 'en-uk', 'en-us', 'eo', 'fi', 'fr', 'de', 'el', 'hi', 'hu', 'is', 'id', 'it', 'ja', 'ko', 'la', 'lv', 'mk', 'no', 'pl', 'pt', 'pt-br', 'ro', 'ru', 'sr', 'sk', 'es', 'es-es', 'es-us', 'sw', 'sv', 'ta', 'th', 'tr', 'vi', 'cy', ] DEFAULT_LANG = 'en' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORT_LANGUAGES), }) @asyncio.coroutine def async_get_engine(hass, config): """Set up Google speech component.""" return GoogleProvider(hass, config[CONF_LANG]) class GoogleProvider(Provider): """The Google speech API provider.""" def __init__(self, hass, lang): """Init Google TTS service.""" self.hass = hass
DEFAULT_OUTPUT_FORMAT = 'mp3' DEFAULT_TEXT_TYPE = 'text' DEFAULT_SAMPLE_RATES = { 'mp3': '22050', 'ogg_vorbis': '22050', 'pcm': '16000' } PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_REGION, default=DEFAULT_REGION): vol.In(SUPPORTED_REGIONS), vol.Inclusive(CONF_ACCESS_KEY_ID, ATTR_CREDENTIALS): cv.string, vol.Inclusive(CONF_SECRET_ACCESS_KEY, ATTR_CREDENTIALS): cv.string, vol.Exclusive(CONF_PROFILE_NAME, ATTR_CREDENTIALS): cv.string, vol.Optional(CONF_VOICE, default=DEFAULT_VOICE): vol.In(SUPPORTED_VOICES), vol.Optional(CONF_OUTPUT_FORMAT, default=DEFAULT_OUTPUT_FORMAT): vol.In(SUPPORTED_OUTPUT_FORMATS), vol.Optional(CONF_SAMPLE_RATE): vol.All(cv.string, vol.In(SUPPORTED_SAMPLE_RATES)), vol.Optional(CONF_TEXT_TYPE, default=DEFAULT_TEXT_TYPE): vol.In(SUPPORTED_TEXT_TYPES), }) def get_engine(hass, config): """Set up Amazon Polly speech component.""" # pylint: disable=import-error output_format = config.get(CONF_OUTPUT_FORMAT) sample_rate = config.get(CONF_SAMPLE_RATE, DEFAULT_SAMPLE_RATES[output_format]) if sample_rate not in SUPPORTED_SAMPLE_RATES_MAP.get(output_format):
'ulaw_8khz_stereo', 'ulaw_11khz_mono', 'ulaw_11khz_stereo', 'ulaw_22khz_mono', 'ulaw_22khz_stereo', 'ulaw_44khz_mono', 'ulaw_44khz_stereo', ] CONF_CODEC = 'codec' CONF_FORMAT = 'format' DEFAULT_LANG = 'en-us' DEFAULT_CODEC = 'mp3' DEFAULT_FORMAT = '8khz_8bit_mono' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_API_KEY): cv.string, vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORT_LANGUAGES), vol.Optional(CONF_CODEC, default=DEFAULT_CODEC): vol.In(SUPPORT_CODECS), vol.Optional(CONF_FORMAT, default=DEFAULT_FORMAT): vol.In(SUPPORT_FORMATS), }) @asyncio.coroutine def async_get_engine(hass, config): """Setup VoiceRSS speech component.""" return VoiceRSSProvider(hass, config) class VoiceRSSProvider(Provider): """VoiceRSS speech api provider.""" def __init__(self, hass, conf): """Init VoiceRSS TTS service."""
'zh', ] CONF_PERSON_ID = 'person_id' CONF_LANG = 'lang' CONF_SPEED = 'speed' CONF_VOLUME = 'volume' TOKEN_API = 'http://www.peiyinge.com/make/getSynthSign' TEXT2AUDIO_API_FMT = 'http://proxy.peiyinge.com:17063/synth?ts={0}&sign={1}&vid={2}&speed={3}&volume={4}&content={5}' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_PERSON_ID, default=None): cv.string, vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORT_LANGUAGES), vol.Optional(CONF_SPEED, default='5'): cv.string, vol.Optional(CONF_VOLUME, default='5'): cv.string, }) def get_engine(hass, config): person_id = config.get(CONF_PERSON_ID) lang = config.get(CONF_LANG) speed = config.get(CONF_SPEED) volume = config.get(CONF_VOLUME) return PeiyingeTTS(lang, person_id, speed, volume) class PeiyingeTTS(Provider):
CONF_APP_ID = 'app_id' CONF_SECRET_KEY = 'secret_key' CONF_SPEED = 'speed' CONF_PITCH = 'pitch' CONF_VOLUME = 'volume' CONF_PERSON = 'person' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORT_LANGUAGES), vol.Required(CONF_APP_ID): cv.string, vol.Required(CONF_API_KEY): cv.string, vol.Required(CONF_SECRET_KEY): cv.string, vol.Optional(CONF_SPEED, default=5): vol.All( vol.Coerce(int), vol.Range(min=0, max=9)), vol.Optional(CONF_PITCH, default=5): vol.All( vol.Coerce(int), vol.Range(min=0, max=9)), vol.Optional(CONF_VOLUME, default=5): vol.All( vol.Coerce(int), vol.Range(min=0, max=15)), vol.Optional(CONF_PERSON, default=0): vol.All( vol.Coerce(int), vol.Range(min=0, max=4)), }) def get_engine(hass, config): """Set up Baidu TTS component.""" return BaiduTTSProvider(hass, config) class BaiduTTSProvider(Provider): """Baidu TTS speech api provider."""
DEFAULT_PORT = 59125 DEFAULT_LANG = "en_US" DEFAULT_VOICE = "cmu-slt-hsmm" DEFAULT_CODEC = "WAVE_FILE" DEFAULT_EFFECTS = {} MAP_MARYTTS_CODEC = {"WAVE_FILE": "wav", "AIFF_FILE": "aiff", "AU_FILE": "au"} PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORT_LANGUAGES), vol.Optional(CONF_VOICE, default=DEFAULT_VOICE): cv.string, vol.Optional(CONF_CODEC, default=DEFAULT_CODEC): vol.In(SUPPORT_CODEC), vol.Optional(CONF_EFFECT, default=DEFAULT_EFFECTS): { vol.All(cv.string, vol.In(SUPPORT_EFFECTS)): cv.string }, }) def get_engine(hass, config, discovery_info=None): """Set up MaryTTS speech component.""" return MaryTTSProvider(hass, config) class MaryTTSProvider(Provider):
"ulaw_44khz_mono", "ulaw_44khz_stereo", ] CONF_CODEC = "codec" CONF_FORMAT = "format" DEFAULT_LANG = "en-us" DEFAULT_CODEC = "mp3" DEFAULT_FORMAT = "8khz_8bit_mono" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_API_KEY): cv.string, vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORT_LANGUAGES), vol.Optional(CONF_CODEC, default=DEFAULT_CODEC): vol.In(SUPPORT_CODECS), vol.Optional(CONF_FORMAT, default=DEFAULT_FORMAT): vol.In(SUPPORT_FORMATS), } ) async def async_get_engine(hass, config, discovery_info=None): """Set up VoiceRSS TTS component.""" return VoiceRSSProvider(hass, config) class VoiceRSSProvider(Provider): """The VoiceRSS speech API provider.""" def __init__(self, hass, conf): """Init VoiceRSS TTS service."""
gender = value.get(CONF_GENDER) if gender is None: gender = value[CONF_GENDER] = next( (chk_gender for chk_lang, chk_gender in MAP_VOICE if chk_lang == lang), None) if (lang, gender) not in MAP_VOICE: raise vol.Invalid("Unsupported language and gender specified.") return value PLATFORM_SCHEMA = vol.All( PLATFORM_SCHEMA.extend({ vol.Optional(CONF_LANG, default=DEFAULT_LANG): str, vol.Optional(CONF_GENDER): str, }), validate_lang, ) async def async_get_engine(hass, config, discovery_info=None): """Set up Cloud speech component.""" cloud: Cloud = hass.data[DOMAIN] if discovery_info is not None: language = DEFAULT_LANG gender = DEFAULT_GENDER else: language = config[CONF_LANG] gender = config[CONF_GENDER]
DEFAULT_SAMPLE_RATES = { 'mp3': '22050', 'ogg_vorbis': '22050', 'pcm': '16000', } PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_REGION, default=DEFAULT_REGION): vol.In(SUPPORTED_REGIONS), vol.Inclusive(CONF_ACCESS_KEY_ID, ATTR_CREDENTIALS): cv.string, vol.Inclusive(CONF_SECRET_ACCESS_KEY, ATTR_CREDENTIALS): cv.string, vol.Exclusive(CONF_PROFILE_NAME, ATTR_CREDENTIALS): cv.string, vol.Optional(CONF_VOICE, default=DEFAULT_VOICE): vol.In(SUPPORTED_VOICES), vol.Optional(CONF_OUTPUT_FORMAT, default=DEFAULT_OUTPUT_FORMAT): vol.In(SUPPORTED_OUTPUT_FORMATS), vol.Optional(CONF_SAMPLE_RATE): vol.All(cv.string, vol.In(SUPPORTED_SAMPLE_RATES)), vol.Optional(CONF_TEXT_TYPE, default=DEFAULT_TEXT_TYPE): vol.In(SUPPORTED_TEXT_TYPES), }) def get_engine(hass, config): """Set up Amazon Polly speech component.""" output_format = config.get(CONF_OUTPUT_FORMAT) sample_rate = config.get(CONF_SAMPLE_RATE, DEFAULT_SAMPLE_RATES[output_format])
'pt-pt', 'ro-ro', 'ru-ru', 'sk-sk', 'sv-se', 'th-th', 'tr-tr', 'zh-cn', 'zh-hk', 'zh-tw', ] GENDERS = [ 'Female', 'Male', ] DEFAULT_LANG = 'en-us' DEFAULT_GENDER = 'Female' DEFAULT_TYPE = 'ZiraRUS' DEFAULT_OUTPUT = 'audio-16khz-128kbitrate-mono-mp3' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_API_KEY): cv.string, vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORTED_LANGUAGES), vol.Optional(CONF_GENDER, default=DEFAULT_GENDER): vol.In(GENDERS), vol.Optional(CONF_TYPE, default=DEFAULT_TYPE): cv.string, }) def get_engine(hass, config): """Set up Microsoft speech component.""" return MicrosoftProvider(config[CONF_API_KEY], config[CONF_LANG], config[CONF_GENDER], config[CONF_TYPE]) class MicrosoftProvider(Provider): """The Microsoft speech API provider.""" def __init__(self, apikey, lang, gender, ttype): """Init Microsoft TTS service."""
import tempfile import shutil import subprocess import logging import voluptuous as vol from homeassistant.components.tts import Provider, PLATFORM_SCHEMA, CONF_LANG _LOGGER = logging.getLogger(__name__) SUPPORT_LANGUAGES = ['en-US', 'en-GB', 'de-DE', 'es-ES', 'fr-FR', 'it-IT'] DEFAULT_LANG = 'en-US' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORT_LANGUAGES), }) def get_engine(hass, config): """Set up Pico speech component.""" if shutil.which("pico2wave") is None: _LOGGER.error("'pico2wave' was not found") return False return PicoProvider(config[CONF_LANG]) class PicoProvider(Provider): """The Pico TTS API provider.""" def __init__(self, lang): """Initialize Pico TTS provider."""
DEFAULT_FORMAT = 'mp3' # wav|mp3|opus|flac DEFAULT_LANG = 'ru-RU' DEFAULT_PITCH = 50 # 0..100 DEFAULT_PORT = 8080 DEFAULT_RATE = 50 # 0..100 DEFAULT_VOICE = 'anna' DEFAULT_VOLUME = 50 # 0..100 PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_FORMAT, default=DEFAULT_FORMAT): cv.string, vol.Optional(CONF_PITCH, default=DEFAULT_PITCH): cv.positive_int, vol.Optional(CONF_RATE, default=DEFAULT_RATE): cv.positive_int, vol.Optional(CONF_VOICE, default=DEFAULT_VOICE): cv.string, vol.Optional(CONF_VOLUME, default=DEFAULT_VOLUME): cv.positive_int, }) async def async_get_engine(hass, config, discovery_info=None): """Set up RHVoice speech component.""" return RHVoiceProvider(hass, config) class RHVoiceProvider(Provider):