def handle(self, handler_input): attr = handler_input.attributes_manager.persistent_attributes height = attr.get('height', 0) speech = Speech() speech.add_text('次は水を上げて欲しいな。楽しみに待っています。') speech_text = speech.speak() cactus_image = utils.ImageGetter().get_image(utils.get_level(height), utils.select_scene()) ret_img = Image(sources=[ImageInstance(url=cactus_image)]) title = data.SKILL_NAME primary_text = get_plain_text_content(primary_text="") if utils.supports_apl(handler_input): handler_input.response_builder.add_directive( RenderTemplateDirective( BodyTemplate2( back_button=BackButtonBehavior.VISIBLE, image=ret_img, title=title, text_content=primary_text))).set_should_end_session( True) handler_input.response_builder.set_card( ui.StandardCard(title=data.get_standard_card_title(height), text=data.get_standard_card_text(height), image=ui.Image(small_image_url=cactus_image, large_image_url=cactus_image))) return handler_input.response_builder.speak(speech_text).response
def test_whisper(self): speech = Speech() ssml = speech.whisper("I am not a real human.", is_nested=True) self.assertEqual( ssml, '<amazon:effect name="whispered">I am not a real human.</amazon:effect>' )
def test_voice(self): speech = Speech() ssml = speech.voice(value="I am not a real human.", name="Kendra", is_nested=True) self.assertEqual( ssml, '<voice name="Kendra">I am not a real human.</voice>')
def test_sub(self): speech = Speech() ssml = speech.sub(value="Al", alias="aluminum", is_nested=True) self.assertEqual(ssml, '<sub alias="aluminum">Al</sub>') ssml = speech.sub(value="Mg", alias="magnesium", is_nested=True) self.assertEqual(ssml, '<sub alias="magnesium">Mg</sub>')
def test_audio(self): speech = Speech() ssml = speech.audio( 'soundbank://soundlibrary/transportation/amzn_sfx_car_accelerate_01', is_nested=True) self.assertEqual( ssml, '<audio src="soundbank://soundlibrary/transportation/amzn_sfx_car_accelerate_01" />' )
def test_pause(self): speech = Speech() ssml = speech.pause(time="3s", is_nested=True) self.assertEqual(ssml, '<break time="3s"/>') speech.add_text("There is a three second pause here ") speech.pause(time="3s") speech.add_text("then the speech continues.") self.assertEqual( speech.speak(), '<speak>There is a three second pause here <break time="3s"/>' 'then the speech continues.</speak>')
def test_emphasis(self): speech = Speech() speech.add_text('I already told you I ') speech.emphasis('really like', 'strong') speech.add_text(' that person') self.assertEqual( speech.speak(), '<speak>I already told you I <emphasis level="strong">really like</emphasis>' ' that person</speak>')
def test_prosody(self): speech = Speech() ssml = speech.prosody(value="all medium", is_nested=True) self.assertEqual( ssml, '<prosody rate="medium" pitch="medium" volume="medium">all medium</prosody>' ) ssml = speech.prosody(value="x-fast", rate='x-fast', is_nested=True) self.assertEqual( ssml, '<prosody rate="x-fast" pitch="medium" volume="medium">x-fast</prosody>' ) ssml = speech.prosody(value="slow x-high", rate='slow', pitch='x-high', is_nested=True) self.assertEqual( ssml, '<prosody rate="slow" pitch="x-high" volume="medium">slow x-high</prosody>' ) ssml = speech.prosody(value="70% low x-soft", rate='70%', pitch='+50%', volume='x-soft', is_nested=True) self.assertEqual( ssml, '<prosody rate="70%" pitch="+50%" volume="x-soft">70% low x-soft</prosody>' )
def test_p(self): speech = Speech() speech.p( "This is the first paragraph. There should be a pause after this text is spoken." ) speech.p("This is the second paragraph.") self.assertEqual( speech.speak(), '<speak><p>This is the first paragraph. There should be a pause after' ' this text is spoken.</p><p>This is the second paragraph.</p></speak>' )
def handle(self, handler_input): status = handler_input.attributes_manager.session_attributes.get('status') attr = handler_input.attributes_manager.persistent_attributes if status == 'confirmation': before_height = attr.get('height') in_skill_response = utils.in_skill_product_resposne(handler_input) logger.info(f'in_skill_response: {in_skill_response}') grow_rate_of_up = 1.0 if utils.is_subscriptable(handler_input): grow_rate_of_up = 1.5 grow = utils.get_grow_value() * Decimal(grow_rate_of_up) grow_height = before_height + grow if before_height else grow attr['height'] = grow_height attr['last_watered_date'] = str(datetime.date.today()) handler_input.attributes_manager.persistent_attributes = attr handler_input.attributes_manager.save_persistent_attributes() speech = Speech() speech.audio(utils.get_sound_url(data.SOUND_FLOWER)) if not before_height: speech.add_text(f""" <say-as interpret-as="interjection">わ〜い</say-as>。 とっても可愛いですね! 全長は{grow_height}ミリメートルです。 明日の様子も楽しみですね。 """) else: speech.add_text(f""" 水やりが終わりました。 {grow}ミリ成長しました! 全長は{grow_height}ミリメートルです。 明日も成長が楽しみですね。 """) speech_text = speech.speak() cactus_image = utils.ImageGetter().get_image( utils.get_level(grow_height), utils.select_scene() ) ret_img = Image(sources=[ImageInstance(url=cactus_image)]) title = data.SKILL_NAME primary_text = get_plain_text_content( primary_text="") if utils.supports_apl(handler_input): handler_input.response_builder.add_directive( RenderTemplateDirective( BodyTemplate2( back_button=BackButtonBehavior.VISIBLE, image=ret_img, title=title, text_content=primary_text ))).set_should_end_session(True) handler_input.response_builder.set_card( ui.StandardCard( title=data.get_standard_card_title(grow_height), text=data.get_standard_card_text(grow_height), image=ui.Image( small_image_url=cactus_image, large_image_url=cactus_image ) ) ) return handler_input.response_builder.speak(speech_text).response elif status == 'check_status': height = utils.get_cactus_height(handler_input) speech = Speech() speech.add_text(f'全長は{height}ミリメートルです。' f'明日も成長が楽しみですね。') speech_text = speech.speak() cactus_image = utils.ImageGetter().get_image( utils.get_level(height), utils.select_scene() ) ret_img = Image(sources=[ImageInstance(url=cactus_image)]) primary_text = get_plain_text_content(primary_text='') if utils.supports_apl(handler_input): handler_input.response_builder.add_directive( RenderTemplateDirective( BodyTemplate2( back_button=BackButtonBehavior.VISIBLE, image=ret_img, title=data.SKILL_NAME, text_content=primary_text ) ) ).set_should_end_session(True) handler_input.response_builder.set_card( ui.StandardCard( title=data.get_standard_card_title(height), text=data.get_standard_card_text(height), image=ui.Image( small_image_url=cactus_image, large_image_url=cactus_image ) ) ) return handler_input.response_builder.speak(speech_text).response elif status == 'isp_better_water': return BuyHandler().handle(handler_input) else: speech_text = data.ERROR_SESSION_END_MESSAGE handler_input.response_builder.speak(speech_text).set_card( SimpleCard(data.SKILL_NAME, speech_text)).set_should_end_session( True) return handler_input.response_builder.response
def test_lang(self): speech = Speech() ssml = speech.lang(value="Paris", lang="fr-FR", is_nested=True) self.assertEqual(ssml, '<lang xml:lang="fr-FR">Paris</lang>')
from django_ask_sdk.skill_adapter import SkillAdapter from phonetisch import soundex from rest_framework import status from rest_framework.decorators import api_view, permission_classes from rest_framework.response import Response from ssml_builder.core import Speech from alexa.models import LanguageModel, Intent from assistant import intents from assistant.constants import NO_CACHE_HEADERS from assistant.permissions import AuthorizedAgentPermission from assistant.utility_functions import request_to_dict, intent_response logger = logging.getLogger(__name__) speech = Speech() sb = SkillBuilder() @api_view(["GET", "POST"]) @permission_classes((AuthorizedAgentPermission,)) def intent_responder(request): """ Reads an intent request (as sent by Alexa Web services) and responds with an intent response. :param request: :return: """ resp = {} data = request_to_dict(request)
def __init__(self): self.speech = Speech()
class LaunchResponseCreator: def __init__(self): self.speech = Speech() def create_response(self, height, last_watered_date, handler_input): self.pre_response() if height == 0: return self.seed_response() self.base_launch_response(height) if utils.can_water(last_watered_date): return self.can_water_response(handler_input) return self.cannot_water_response(height, handler_input) def pre_response(self): if os.environ.get('AWS_LAMBDA_FUNCTION_VERSION') == '$LATEST': self.speech.add_text('デブです。') def seed_response(self): self.speech.add_text('そういえば、さぼてんの種を拾ったんですよね。' '一緒に育てませんか?') return self.speech.speak() def base_launch_response(self, height): self.speech.add_text(f'はい、きみのサボテンはこちらです。' f'全長は{height}ミリメートルです。') return self.speech.speak() def can_water_response(self, handler_input): speech = f'さぼてんに水をあげますか?' if utils.is_subscriptable(handler_input): # TODO BuyIntentHandlerに飛ばせるか、、、? # return GetFactHandler().handle(handler_input) speech = f'さぼてんに綺麗な水をあげますか?' self.speech.add_text(speech) return self.speech.speak() def cannot_water_response(self, height, handler_input): # TODO ISP対応 課金してなければアップセル if not utils.is_subscriptable(handler_input): self.speech.add_text(f'綺麗な水を購入して、さぼてんの成長をもっと早くしますか?') # 'さぼてんの形がどんどん変わっていくのを見るのって楽しいですよね。' else: tomorrow = self.speech.sub(value="明日", alias="あした", is_nested=True) self.speech.add_text(f'また{tomorrow}水やりしてくださいね。') return self.speech.speak()
def get(self): speech = Speech() return speech.add_text(self.text).speak()
def test_say_as(self): speech = Speech() ssml = speech.say_as(value='12345', interpret_as='cardinal', is_nested=True) self.assertEqual(ssml, '<say-as interpret-as="cardinal">12345</say-as>') ssml = speech.say_as(value='12345', interpret_as='digits', is_nested=True) self.assertEqual(ssml, '<say-as interpret-as="digits">12345</say-as>') ssml = speech.say_as(value='hello', interpret_as='spell-out', is_nested=True) self.assertEqual(ssml, '<say-as interpret-as="spell-out">hello</say-as>') speech.say_as(value='hello', interpret_as='spell-out') self.assertEqual( speech.speak(), '<speak><say-as interpret-as="spell-out">hello</say-as></speak>') speech.say_as(value='hello', interpret_as='spell-out') self.assertEqual( speech.speak(), '<speak><say-as interpret-as="spell-out">hello</say-as>' '<say-as interpret-as="spell-out">hello</say-as></speak>')
from AssitentTypeEnum import AssitentTypeEnum from Assistent import Assistent from ssml_builder.core import Speech import random exercicios = [ "Biiiiiiiiiiiiixaaaaaaaaaaaaaaaaaaa não", ] speech = Speech() for item in exercicios: speech.add_text(item) speech.pause(time="1s") type_asistente = AssitentTypeEnum.AWS assist = Assistent.factory(type_asistente) mp3 = assist.synthesize_speech(speech.speak()) with open("out/{}.mp3".format(type_asistente.name), 'wb') as out: out.write(mp3)
"Mexer o pescoço para baixo e para cima 15 segundos", "Girando as mãos 15 segundos", "Alongando os dedos 15 segundos", "Alongando as pernas 15 segundos", "Flexionando as pernas 15 segundos", "Joelho na frente 15 segundos", "Flexionar o Joelho 15 segundos" ] half_1 = [1, 2, 3, 4, 5, 6, 7] half_2 = [8, 9, 10, 11, 12, 13, 14, 15] mylist = [ "Qilson", "O que não te desafia não faz você mudar!", "", "", "", "", "O corpo alcança o que a mente acredita." "", "", "", "" ] speech = Speech() for item in exercicios: speech.add_text(item) speech.pause(time="2s") for n in half_1: speech.add_text(str(n)) speech.pause(time="1s") speech.add_text(random.choice(mylist)) speech.pause(time="1s") for n in half_2: speech.add_text(str(n)) speech.pause(time="1s")