def bind(self, bus): """ Overrides the normal bind method. Adds handlers for play:query and play:start messages allowing interaction with the playback control skill. This is called automatically during setup, and need not otherwise be used. """ if bus: super().bind(bus) self.audioservice = AudioService(self.bus) self.add_event('play:query', self.__handle_play_query) self.add_event('play:start', self.__handle_play_start)
class AntoniaSkill(MycroftSkill): # The constructor of the skill, which calls MycroftSkill's constructor. def __init__(self): super(AntoniaSkill, self).__init__("AntoniaSkill") # Initialize working variables used within the skill. self.count = 0 # This will be the json to send to the server. self.jsonTest = { "user": "******", } # Config variables self.JSON_PATH = '/home/pi/Antonia/assets/json/request.json' self.AUDIO_PATH = "/home/pi/answer/answer.mp3" self.REQUEST_JSON = 'request.json' self.NGROK_ROUTE = 'https://projectantonia.ngrok.io/test/message' # Creating I have a question intent. def initialize(self): i_have_a_question = IntentBuilder("IHaveAQuestion").require( "IHaveAQuestion").build() self.register_intent(i_have_a_question, self.handle_i_have_a_question_intent) self.audio_service = AudioService(self.bus) @intent_handler(IntentBuilder("").require("IHaveAQuestion")) def handle_i_have_a_question_intent(self, message): self.speak_dialog("i.have.a.question") recognizer = sr.Recognizer() source = sr.Microphone() with sr.Microphone() as source: recognizer.adjust_for_ambient_noise( source, duration=1) # nivela el microfono segun el ruido del ambiente audio = recognizer.listen(source, timeout=None) question = recognizer.recognize_google(audio, language="es-ES") add_atributes_to_json(question) generate_json() execute_curl(self.REQUEST_JSON, self.NGROK_ROUTE) play_mp3(self.AUDIO_PATH) os.remove(self.AUDIO_PATH) def add_atributes_to_json(self, request): self.jsonTest["text"] = request def generate_json(self): with open(self.JSON_PATH, 'w') as outfile: json.dump(self.jsonTest, outfile) def execute_curl(self, jsonName, tunnelUrl): os.system( 'curl -H "Content-Type: application/json" -d @/home/pi/Antonia/assets/json/' + jsonName + ' ' + tunnelUrl) def play_mp3(self, audio_path): while not os.path.exists(audio_path): time.sleep(3) self.audio_service.play(audio_path) os.system('mpg123 ' + audio_path)
def initialize(self): self.audio_service = AudioService(self.emitter) self.set_visual()
class RadioSkill(MycroftSkill): def __init__(self): super(RadioSkill, self).__init__(name="RadioSkill") self.audioservice = None def initialize(self): if AudioService: self.audioservice = AudioService(self.emitter) whatson_dlf_intent = IntentBuilder("WhatsonDlfIntent"). \ require("WhatsonKeyword"). \ require("DlfKeyword").build() self.register_intent(whatson_dlf_intent, self.handle_whatson_dlf_intent) whatson_dradio_intent = IntentBuilder("WhatsonDradioIntent"). \ require("WhatsonKeyword"). \ require("DradioKeyword").build() self.register_intent(whatson_dradio_intent, self.handle_whatson_dradio_intent) whatson_nova_intent = IntentBuilder("WhatsonNovaIntent"). \ require("WhatsonKeyword"). \ require("NovaKeyword").build() self.register_intent(whatson_nova_intent, self.handle_whatson_nova_intent) dlf_intent = IntentBuilder("DlfIntent"). \ require("DlfKeyword").require("PlayKeyword").build() self.register_intent(dlf_intent, self.handle_dlf_intent) dradio_intent = IntentBuilder("DradioIntent"). \ require("DradioKeyword").require("PlayKeyword").build() self.register_intent(dradio_intent, self.handle_dradio_intent) nova_intent = IntentBuilder("NovaIntent"). \ require("NovaKeyword").require("PlayKeyword").build() self.register_intent(nova_intent, self.handle_nova_intent) def handle_whatson_dlf_intent(self, message): r = requests.get('http://www.deutschlandfunk.de') soup = BeautifulSoup(r.text) for el in soup.find_all(id='dlf-player-jetzt-im-radio'): for a_el in el.find_all('a'): self.speak_dialog("currently", { "station": "dlf", "title": a_el.string }) def handle_whatson_dradio_intent(self, message): r = requests.get('http://www.deutschlandfunkkultur.de/') soup = BeautifulSoup(r.text) for el in soup.find_all(id='drk-player-jetzt-im-radio'): for a_el in el.find_all('a'): self.speak_dialog("currently", { "station": "dlf culture", "title": a_el.string }) def handle_whatson_nova_intent(self, message): r = requests.get( 'https://www.deutschlandfunknova.de/actions/dradio/playlist/onair') j = r.json() self.speak_dialog("currently", { "station": "dlf nova", "title": j['show']['title'] }) def handle_dlf_intent(self, message): if self.audioservice: self.audioservice.play(DLF_URL, message.data['utterance']) else: self.process = play_mp3(DLF_URL) def handle_dradio_intent(self, message): if self.audioservice: self.audioservice.play(DRADIO_URL, message.data['utterance']) else: self.process = play_mp3(DRADIO_URL) def handle_nova_intent(self, message): if self.audioservice: self.audioservice.play(NOVA_URL, message.data['utterance']) else: self.process = play_mp3(NOVA_URL) def stop(self): pass
class EasterEggsSkill(MycroftSkill): def __init__(self): super(EasterEggsSkill, self).__init__() def initialize(self): stardate_intent = IntentBuilder("StardateIntent").\ require("StardateKeyword").build() self.register_intent(stardate_intent, self.handle_stardate_intent) intent = IntentBuilder("PodBayDoorsIntent"). \ require("PodBayDoorsKeyword").build() self.register_intent(intent, self.handle_pod_intent) intent = IntentBuilder("LanguagesYouSpeakIntent"). \ require("LanguagesYouSpeakKeyword").build() self.register_intent(intent, self.handle_number_of_languages_intent) intent = IntentBuilder("RoboticsLawsIntent"). \ require("RoboticsKeyword").require("LawKeyword")\ .optionally("LawOfRobotics").build() self.register_intent(intent, self.handle_robotic_laws_intent) intent = IntentBuilder("rock_paper_scissors_lizard_spockIntent"). \ require("rock_paper_scissors_lizard_spock_Keyword").build() self.register_intent(intent, self.handle_rock_paper_scissors_lizard_spock_intent) intent = IntentBuilder("GladosIntent"). \ require("GladosKeyword").build() self.register_intent(intent, self.handle_glados_intent) intent = IntentBuilder("DukeNukemIntent"). \ require("DukeNukemKeyword").build() self.register_intent(intent, self.handle_dukenukem_intent) intent = IntentBuilder("HALIntent"). \ require("HALKeyword").build() self.register_intent(intent, self.handle_hal_intent) self.audio_service = AudioService(self.emitter) def handle_stardate_intent(self, message): sd = Stardate().toStardate() self.speak_dialog("stardate", {"stardate": sd}) def handle_pod_intent(self, message): self.speak_dialog("pod") def handle_robotic_laws_intent(self, message): law = str(message.data.get("LawOfRobotics", "all")) if law == "1": self.speak_dialog("rule1") elif law == "2": self.speak_dialog("rule2") elif law == "3": self.speak_dialog("rule3") else: self.speak_dialog("rule1") self.speak_dialog("rule2") self.speak_dialog("rule3") def handle_rock_paper_scissors_lizard_spock_intent(self, message): self.speak_dialog("rock_paper_scissors_lizard_spock") def handle_number_of_languages_intent(self, message): self.speak_dialog("languages") def handle_glados_intent(self, message): path = dirname(__file__)+"/sounds/portal" files = [mp3 for mp3 in listdir(path) if ".mp3" in mp3] if len(files): mp3 = path + "/" + random.choice(files) self.audio_service.play(mp3) else: self.speak_dialog("bad_file") def handle_hal_intent(self, message): path = dirname(__file__) + "/sounds/hal" files = [mp3 for mp3 in listdir(path) if ".mp3" in mp3] if len(files): mp3 = path + "/" + random.choice(files) self.audio_service.play(mp3) else: self.speak_dialog("bad_file") def handle_dukenukem_intent(self, message): path = dirname(__file__) + "/sounds/dukenukem" files = [wav for wav in listdir(path) if ".wav" in wav] if len(files): wav = path + "/" + random.choice(files) play_wav(wav) else: self.speak_dialog("bad_file") def stop(self): pass
def initialize(self): if AudioService: self.audioservice = AudioService(self.emitter)
class TranslateSkill(MycroftSkill): def __init__(self): super(TranslateSkill, self).__init__('TranslateSkill') self.language = self.lang self.process = None self.path_translated_file = "/tmp/translated.mp3" def initialize(self): self.tts = ConfigurationManager.get().get("tts").get("module") self.audioservice = AudioService(self.emitter) intent = IntentBuilder('HowUseIntent')\ .require('HowUseKeyword') \ .require('SkillNameKeyword') \ .build() self.register_intent(intent, self.handle_how_use) @intent_handler( IntentBuilder("TranslateIntent").require("TranslateKeyword").require( 'ToKeyword').require('LanguageNameKeyword').require( 'phrase').build()) @adds_context("TranslateContext") def handle_translate_intent(self, message): word = message.data.get("TranslateKeyword") lang = message.data.get("LanguageNameKeyword") sentence = message.data.get("phrase") translated = translate(sentence, lang) self.say(translated, lang) @intent_handler( IntentBuilder("TranslateToIntent").require("TranslateKeyword").require( 'ToKeyword').require('translate').require( 'LanguageNameKeyword').build()) @adds_context("TranslateContext") def handle_translate_to_intent(self, message): lang = message.data.get("LanguageNameKeyword") sentence = message.data.get("translate") to = message.data.get("ToKeyword") translated = translate(sentence, lang) self.say(translated, lang) @intent_handler( IntentBuilder("RepeatTranslate").require('RepeatKeyword').require( "TranslateContext").build()) def handle_repeat_translate(self, message): self.emitter.emit(Message('recognizer_loop:mute_mic')) self.emitter.emit(Message('recognizer_loop:audio_output_start')) time.sleep(1) wait_while_speaking() self.audioservice.play(self.path_translated_file) self.emitter.emit(Message('recognizer_loop:unmute_mic')) self.emitter.emit(Message('recognizer_loop:audio_output_end')) @intent_handler( IntentBuilder("OthersLanguagesIntent").require("SpeakKeyword").require( "LanguageKeyword").build()) @adds_context("OthersLanguagesContext") def handle_others_languages(self, message): data = None self.speak_dialog("yes.ask", data, expect_response=True) @intent_handler( IntentBuilder("OtherLanguageTranslateIntent").require( "OthersLanguagesContext").build()) def handle_other_language_translate(self, message): resp = message.data.get("utterance") langs = [ "en|english", "es|spanish", "it|italian", "fr|french", "nl|dutch", "de|german", "pl|polish", "pt|portuguese", "da|danish", "hu|hungarian", "sv|swedish", "no|norwegian", "ca|catalan", "ro|romanian", "sk|slovak", "zh-TW|chinese", "ja|japanese", "ko|korean", "el|greek", "vi|vietnamese", "tr|turkish", "fi|finnish", "ar|arabic" ] language = self.language self.emitter.emit(Message('recognizer_loop:mute_mic')) i = 0 for i in range(0, len(langs)): lang = langs[i].split("|") if lang == language: print("*****Skip language.....") else: self.enclosure.mouth_text(lang[1][:6]) translated = translate(resp, lang[0]) self.say(translated, lang[0]) audio_file = MP3(self.path_translated_file) time.sleep(audio_file.info.length) i = i + 1 self.speak_dialog("what.did.you.think") def handle_how_use(self, message): self.speak_dialog("how.use") def say(self, sentence, lang): self.enclosure.deactivate_mouth_events() get_sentence = 'wget -q -U Mozilla -O /tmp/translated.mp3 "https://translate.google.com/translate_tts?ie=UTF-8&tl=' + \ str(lang) + '&q=' + str(sentence) + '&client=tw-ob' + '"' os.system(get_sentence) wait_while_speaking() self.audioservice.play(self.path_translated_file) audio_file = MP3(self.path_translated_file) time.sleep(audio_file.info.length) self.enclosure.activate_mouth_events() self.enclosure.mouth_reset() def stop(self): if self.process and self.process.poll() is None: self.process.terminate() self.process.wait()
def initialize(self): logger.info('initializing Playback Control Skill') super(PlaybackControlSkill, self).initialize() self.load_data_files(dirname(__file__)) self.audio_service = AudioService(self.emitter)
class RadioChannelSkill(MycroftSkill): def __init__(self): super(RadioChannelSkill, self).__init__(name="RadioChannelSkill") self.audioservice = None def initialize(self): if AudioService: self.audioservice = AudioService(self.emitter) random_intent = IntentBuilder("RandomIntent"). \ require("TurnKeyword").require("RadioKeyword").require("OnKeyword").build() self.register_intent(random_intent, self.handle_random_intent) dlf_intent = IntentBuilder("DlfIntent").\ require("DlfKeyword").require("TurnKeyword").require("RadioKeyword").require("OnKeyword").build() self.register_intent(dlf_intent, self.handle_dlf_intent) dradio_intent = IntentBuilder("DradioIntent").\ require("DradioKeyword").require("TurnKeyword").require("RadioKeyword").require("OnKeyword").build() self.register_intent(dradio_intent, self.handle_dradio_intent) nova_intent = IntentBuilder("NovaIntent").\ require("NovaKeyword").require("TurnKeyword").require("RadioKeyword").require("OnKeyword").build() self.register_intent(nova_intent, self.handle_nova_intent) energyhh_intent = IntentBuilder("EnergyHHIntent"). \ require("EnergyHHKeyword").require("TurnKeyword").require("RadioKeyword").require("OnKeyword").build() self.register_intent(energyhh_intent, self.handle_energyhh_intent) change_intent = IntentBuilder("ChangeIntent"). \ require("ChangeKeyword").build() self.register_intent(change_intent, self.handle_change_intent) def handle_random_intent(self, message): nr = random.randint(0, 3) self.speak_dialog("currently", {"station": NAME[nr]}) time.sleep(2) if self.audioservice: self.audioservice.play(URLS[nr], message.data['utterance']) global POSITION POSITION = nr else: self.process = play_mp3(URLS[nr]) POSITION = nr def handle_dlf_intent(self, message): if self.audioservice: self.audioservice.play(URLS[0], message.data['utterance']) global POSITION POSITION = 0 else: self.process = play_mp3(URLS[0]) POSITION = 0 def handle_dradio_intent(self, message): if self.audioservice: self.audioservice.play(URLS[1], message.data['utterance']) global POSITION POSITION = 1 else: self.process = play_mp3(URLS[1]) POSITION = 1 def handle_nova_intent(self, message): if self.audioservice: self.audioservice.play(URLS[2], message.data['utterance']) global POSITION POSITION = 2 else: self.process = play_mp3(URLS[2]) POSITION = 2 def handle_energyhh_intent(self, message): if self.audioservice: self.audioservice.play(URLS[3], message.data['utterance']) global POSITION POSITION = 3 else: self.process = play_mp3(URLS[3]) POSITION = 3 def handle_change_intent(self, message): global POSITION self.speak_dialog("currently", {"station": NAME[POSITION + 1]}) time.sleep(2) if self.audioservice: if POSITION < 3: self.audioservice.play(URLS[POSITION + 1], message.data['utterance']) POSITION = POSITION + 1 else: self.audioservice.play(URLS[0], message.data['utterance']) POSITION = 0 else: if POSITION < 3: self.process = play_mp3(URLS[POSITION + 1]) else: self.process = play_mp3(URLS[0]) def stop(self): pass
def initialize(self): intent = IntentBuilder("NewsIntent").require("NewsKeyword").optionally( "NewsSource").build() self.register_intent(intent, self.handle_intent) self.audio = AudioService(self.emitter)
class QuranSkill(MycroftSkill): def __init__(self): super(QuranSkill, self).__init__(name="QuranSkill") self.process = None def initialize(self): self.audioservice = AudioService(self.bus) ################################################################### @intent_file_handler('surah.intent') def handle_surah_intent(self, message): article = message.data.get('surah') readerName = message.data.get('reader') if readerName is None: reader = random.choice(utils.readers) else: try: reader_i = utils.readers_ar.index(readerName) reader = utils.readers[reader_i] except ValueError: reader = random.choice(utils.readers) #print("reader ValueError") if article is None: surah = str(random.choice(range(1, 114))) else: try: surah = str(utils.surahs.index(article) + 1) except ValueError: surah = str(random.choice(range(1, 114))) #print("surah ValueError") #print(article) #print(readerName) #print(surah) #print(reader) if readerName != "متنوع": url = "http://api.alquran.cloud/v1/surah/" + surah + "/" + reader json = utils.json_from_url(url) path = utils.parse_surah(json) else: paths = [] path = [] for reader in utils.readers: url = "http://api.alquran.cloud/v1/surah/" + surah + "/" + reader json = utils.json_from_url(url) paths.append(utils.parse_surah(json)) for ii in range(len(paths[0])): path.append(paths[random.choice(range(0, 9))][ii]) #print(path[ii]) try: #self.speak_dialog('quran') #wait_while_speaking() self.audioservice.play(path) except Exception as e: self.log.error("Error: {0}".format(e)) ################################################################################# @intent_file_handler('tafseer.intent') def handle_tafseer_intent(self, message): article = message.data.get('surah') if article is None: surah = str(random.choice(range(1, 114))) else: try: surah = str(utils.surahs.index(article) + 1) except ValueError: surah = str(random.choice(range(1, 114))) #Audio url = "http://api.alquran.cloud/v1/surah/" + surah + "/ar.alafasy" json = utils.json_from_url(url) path_surah = utils.parse_surah(json) #Tafseer #url="http://api.alquran.cloud/v1/surah/"+surah+"/editions/ar.muyassar" url = "http://api.alquran.cloud/v1/surah/" + surah + "/editions/ar.jalalayn" json = utils.json_from_url(url) path_tafseer = utils.parse_tafseer(json) try: for ayah in range(len(path_tafseer)): #self.audioservice.play(path_surah[ayah]) #wait_while_speaking() self.speak(path_tafseer[ayah]) wait_while_speaking() except Exception as e: self.log.error("Error: {0}".format(e)) ################################################################################# def stop(self): if self.process and self.process.poll() is None: self.speak_dialog('quran.stop') self.process.terminate() self.process.wait()
class AnimalSounds(MycroftSkill): def __init__(self): super(AnimalSounds, self).__init__(name="AnimalSounds") self.process = None def initialize(self): self.audioservice = AudioService(self.bus) @intent_file_handler('sounds.animal.intent') def handle_sounds_animal(self, message): UnknownAnimal = [ "I don't know that animal", "I can't make that sound", "Please give me an easier animal", "You tell me" ] animals = [('baby', 'wha wha'), ('bird', 'tweet tweet'), ('cat', 'meow'), ('cow', 'mooo'), ('dog', 'woof woof'), ('duck', 'qwack qwack'), ('frog', 'ribbit'), ('hen', 'cluck cluck cluck'), ('horse', 'neigh'), ('lamb', 'baa baaa'), ('lion', 'roar'), ('owl', 'who who'), ('rooster', 'c**k a doodle do'), ('wolf', 'howl')] synonyms = [('baby', 'child'), ('bird', 'gull'), ('cat', 'kitten'), ('cow', 'calf'), ('dog', 'puppy'), ('dog', 'tashie'), ('duck', 'duck'), ('frog', 'toad'), ('hen', 'chicken'), ('horse', 'pony'), ('lamb', 'sheep'), ('lamb', 'goat'), ('lion', 'lion'), ('owl', 'owl'), ('rooster', 'rooster'), ('wolf', 'coyote')] animal_chosen = message.data.get('animal') AnimalVoice = animal_chosen if animal_chosen != "": RndmNum = random.randint(0, 4) #RndmNum = 0 #causes a speak.dialog rather than recording #check for synonyms for voice, choice in synonyms: if (choice == animal_chosen): AnimalVoice = voice break self.log.debug('animal_chosen is ' + animal_chosen) self.log.debug('AnimalVoice is ' + AnimalVoice) self.log.debug('RndmNum is ' + str(RndmNum)) # Test to see if animal is defined found = False for animal, sound in animals: if (animal == AnimalVoice): SoundToSpeak = sound found = True break if found == False: #animal not found if RndmNum == 4: #play jeopardy song path = (join(dirname(__file__), "Sounds", "JeopardySongShort.mp3")) if pathlib.Path(path).exists(): self.audioservice.play(path) else: self.log.error('Path = ' + path) self.speak_dialog( "I seem to be lost. check the error log") else: #say something witty self.speak_dialog(UnknownAnimal[RndmNum]) else: #The animal is defined if (RndmNum == 0): #speak animal sound self.speak_dialog(animal_chosen + ' says ') wait_while_speaking() self.speak_dialog(SoundToSpeak) else: #play recording of animal sound RndmSnd = AnimalVoice + str(RndmNum) + '.mp3' path = join(dirname(__file__), "Sounds", RndmSnd) if pathlib.Path(path).exists(): self.speak_dialog(animal_chosen + ' says ') wait_while_speaking() self.audioservice.play(path) else: #path not found self.log.error('Path = ' + path) self.speak_dialog( "I seem to be lost. check the error log") else: self.speak_dialog("no animal specified") def stop(self): if self.process and self.process.poll() is None: self.speak_dialog('singing.stop') self.process.terminate() self.process.wait()
class GimletPodcastSkill(MycroftSkill): # The constructor of the skill, which calls MycroftSkill's constructor def __init__(self): super(GimletPodcastSkill, self).__init__(name="GimletPodcastSkill") #List of all of the currently available rss urls from gimlet self.rss_urls = { 'reply-all': 'http://feeds.gimletmedia.com/hearreplyall', 'startup': 'http://feeds.gimletmedia.com/hearstartup', 'elt': 'http://feeds.gimletmedia.com/eltshow', 'crimetown': 'http://feeds.gimletmedia.com/crimetownshow', 'heavyweight': 'http://feeds.gimletmedia.com/heavyweightpodcast', 'homecoming': 'http://feeds.gimletmedia.com/homecomingshow', 'mogul': 'http://feeds.gimletmedia.com/mogulshow', 'sampler': 'http://feeds.gimletmedia.com/samplershow', 'svs': 'http://feeds.gimletmedia.com/sciencevs', 'nod': 'http://feeds.gimletmedia.com/thenodshow', 'pitch': 'http://feeds.gimletmedia.com/thepitchshow', 'tremoved': 'http://feeds.gimletmedia.com/twiceremovedshow', 'uncivil': 'http://feeds.gimletmedia.com/uncivil', 'undone': 'http://feeds.gimletmedia.com/undoneshow' } self.process = None self.audioservice = None self.listen_url = "" def initialize(self): play_podcast_intent = IntentBuilder("PlayPodcastIntent").require( "PlayPodcastKeyword").build() self.register_intent(play_podcast_intent, self.handle_play_podcast_intent) if AudioService: self.audioservice = AudioService(self.emitter) def handle_play_podcast_intent(self, message): utter = message.data['utterance'] #listen for some key words to trigger the correct podcast if "reply-all" in utter: self.listen_url = self.rss_urls['reply-all'] elif "startup" in utter: self.listen_url = self.rss_urls['startup'] elif "every little thing" in utter: self.listen_url = self.rss_urls['elt'] elif "crimetown" in utter: self.listen_url = self.rss_urls['crimetown'] elif "heavyweight" in utter: self.listen_url = self.rss_urls['heavyweight'] elif "homecoming" in utter: self.listen_url = self.rss_urls['homecoming'] elif "mogul" in utter: self.listen_url = self.rss_urls['mogul'] elif "sampler" in utter: self.listen_url = self.rss_urls['sampler'] elif "science" in utter: self.listen_url = self.rss_urls['svs'] elif "nod" in utter: self.listen_url = self.rss_urls['nod'] elif "pitch" in utter: self.listen_url = self.rss_urls['pitch'] elif "twice removed" in utter: self.listen_url = self.rss_urls['tremoved'] elif "uncivil" in utter: self.listen_url = self.rss_urls['uncivil'] elif "undone" in utter: self.listen_url = self.rss_urls['undone'] self.speak_dialog('latest') time.sleep(3) data = feedparser.parse(self.listen_url) url = (data['entries'][0]['links'][0]['href']) # if audio service module is available use it if self.audioservice: self.audioservice.play(url, message.data['utterance']) self.enclosure.mouth_text(data['entries'][0]['title']) def stop(self): pass
def initialize(self): intent = IntentBuilder("InternetRadioIntent").require( "InternetRadioKeyword").build() self.register_intent(intent, self.handle_intent) intent = IntentBuilder("HarkIntent").require("HarkKeyword").require( "RadioSearch").build() self.register_intent(intent, self.handle_hark_intent) intent = IntentBuilder("CountryRadioIntent").require( "CountryRadioKeyword").build() self.register_intent(intent, self.handle_country_intent) intent = IntentBuilder("RockRadioIntent").require( "RockRadioKeyword").build() self.register_intent(intent, self.handle_rock_intent) intent = IntentBuilder("ClassicalRadioIntent").require( "ClassicalRadioKeyword").build() self.register_intent(intent, self.handle_classical_intent) intent = IntentBuilder("Top40RadioIntent").require( "Top40RadioKeyword").build() self.register_intent(intent, self.handle_top40_intent) intent = IntentBuilder("JazzRadioIntent").require( "JazzRadioKeyword").build() self.register_intent(intent, self.handle_jazz_intent) intent = IntentBuilder("ChristmasRadioIntent").require( "ChristmasRadioKeyword").build() self.register_intent(intent, self.handle_christmas_intent) intent = IntentBuilder("FavoriteRadioIntent").require( "FavoriteRadioKeyword").build() self.register_intent(intent, self.handle_favorite_intent) intent = IntentBuilder("ChildrensRadioIntent").require( "ChildrensRadioKeyword").build() self.register_intent(intent, self.handle_childrens_intent) intent = IntentBuilder("InternetRadioStopIntent") \ .require("InternetRadioStopVerb") \ .require("InternetRadioKeyword").build() self.register_intent(intent, self.handle_stop) intent = IntentBuilder("DarkPsyRadioIntent").require( "DarkKeyword").require("PsytubeKeyword").optionally( "InternetRadioKeyword").build() self.register_intent(intent, self.handle_dark_psy_intent) intent = IntentBuilder("DarkProgressivePsyRadioIntent").require( "DarkKeyword").require("ProgressiveKeyword").require( "PsytubeKeyword").optionally("InternetRadioKeyword").build() self.register_intent(intent, self.handle_dark_prog_psy_intent) intent = IntentBuilder("ProgressivePsyRadioIntent").require( "ProgressiveKeyword").require("PsytubeKeyword").optionally( "InternetRadioKeyword").build() self.register_intent(intent, self.handle_prog_psy_intent) intent = IntentBuilder("FullonPsyRadioIntent").require( "FullOnKeyword").require("PsytubeKeyword").optionally( "InternetRadioKeyword").build() self.register_intent(intent, self.handle_fullon_intent) intent = IntentBuilder("GoaPsyRadioIntent").require( "GoaKeyword").require("PsytubeKeyword").optionally( "InternetRadioKeyword").build() self.register_intent(intent, self.handle_goa_intent) intent = IntentBuilder("ForestPsyRadioIntent").require( "ForestKeyword").require("PsytubeKeyword").optionally( "InternetRadioKeyword").build() self.register_intent(intent, self.handle_forest_intent) intent = IntentBuilder("SuomiPsyRadioIntent").require( "SuomiKeyword").require("PsytubeKeyword").optionally( "InternetRadioKeyword").build() self.register_intent(intent, self.handle_suomi_intent) intent = IntentBuilder("HitechCorePsyRadioIntent").require( "HiTechKeyword").require("PsytubeKeyword").optionally( "InternetRadioKeyword").build() self.register_intent(intent, self.handle_hitech_psy_intent) intent = IntentBuilder("OrochillPsyRadioIntent").require( "OrochillKeyword").require("PsytubeKeyword").optionally( "InternetRadioKeyword").build() self.register_intent(intent, self.handle_orochill_intent) intent = IntentBuilder("TechnoRadioIntent").require( "TechnoRadioKeyword").optionally("PsytubeKeyword").require( "InternetRadioKeyword").build() self.register_intent(intent, self.handle_techno_intent) intent = IntentBuilder("MinimalTechnoRadioIntent").require( "MinimalTechnoRadioKeyword").optionally("PsytubeKeyword").require( "InternetRadioKeyword").build() self.register_intent(intent, self.handle_minimal_techno_intent) intent = IntentBuilder("DNBRadioIntent").require( "DrumNBassRadioKeyword").optionally("PsytubeKeyword").require( "InternetRadioKeyword").build() self.register_intent(intent, self.handle_dnb_intent) intent = IntentBuilder("PsytubeInternetRadioIntent").require( "InternetRadioKeyword").require("PsytubeKeyword").build() self.register_intent(intent, self.handle_psytube_intent) if AudioService: self.audioservice = AudioService(self.emitter)
class InternetRadioSkill(MycroftSkill): def __init__(self): super(InternetRadioSkill, self).__init__(name="InternetRadioSkill") self.audioservice = None self.process = None def initialize(self): intent = IntentBuilder("InternetRadioIntent").require( "InternetRadioKeyword").build() self.register_intent(intent, self.handle_intent) intent = IntentBuilder("HarkIntent").require("HarkKeyword").require( "RadioSearch").build() self.register_intent(intent, self.handle_hark_intent) intent = IntentBuilder("CountryRadioIntent").require( "CountryRadioKeyword").build() self.register_intent(intent, self.handle_country_intent) intent = IntentBuilder("RockRadioIntent").require( "RockRadioKeyword").build() self.register_intent(intent, self.handle_rock_intent) intent = IntentBuilder("ClassicalRadioIntent").require( "ClassicalRadioKeyword").build() self.register_intent(intent, self.handle_classical_intent) intent = IntentBuilder("Top40RadioIntent").require( "Top40RadioKeyword").build() self.register_intent(intent, self.handle_top40_intent) intent = IntentBuilder("JazzRadioIntent").require( "JazzRadioKeyword").build() self.register_intent(intent, self.handle_jazz_intent) intent = IntentBuilder("ChristmasRadioIntent").require( "ChristmasRadioKeyword").build() self.register_intent(intent, self.handle_christmas_intent) intent = IntentBuilder("FavoriteRadioIntent").require( "FavoriteRadioKeyword").build() self.register_intent(intent, self.handle_favorite_intent) intent = IntentBuilder("ChildrensRadioIntent").require( "ChildrensRadioKeyword").build() self.register_intent(intent, self.handle_childrens_intent) intent = IntentBuilder("InternetRadioStopIntent") \ .require("InternetRadioStopVerb") \ .require("InternetRadioKeyword").build() self.register_intent(intent, self.handle_stop) intent = IntentBuilder("DarkPsyRadioIntent").require( "DarkKeyword").require("PsytubeKeyword").optionally( "InternetRadioKeyword").build() self.register_intent(intent, self.handle_dark_psy_intent) intent = IntentBuilder("DarkProgressivePsyRadioIntent").require( "DarkKeyword").require("ProgressiveKeyword").require( "PsytubeKeyword").optionally("InternetRadioKeyword").build() self.register_intent(intent, self.handle_dark_prog_psy_intent) intent = IntentBuilder("ProgressivePsyRadioIntent").require( "ProgressiveKeyword").require("PsytubeKeyword").optionally( "InternetRadioKeyword").build() self.register_intent(intent, self.handle_prog_psy_intent) intent = IntentBuilder("FullonPsyRadioIntent").require( "FullOnKeyword").require("PsytubeKeyword").optionally( "InternetRadioKeyword").build() self.register_intent(intent, self.handle_fullon_intent) intent = IntentBuilder("GoaPsyRadioIntent").require( "GoaKeyword").require("PsytubeKeyword").optionally( "InternetRadioKeyword").build() self.register_intent(intent, self.handle_goa_intent) intent = IntentBuilder("ForestPsyRadioIntent").require( "ForestKeyword").require("PsytubeKeyword").optionally( "InternetRadioKeyword").build() self.register_intent(intent, self.handle_forest_intent) intent = IntentBuilder("SuomiPsyRadioIntent").require( "SuomiKeyword").require("PsytubeKeyword").optionally( "InternetRadioKeyword").build() self.register_intent(intent, self.handle_suomi_intent) intent = IntentBuilder("HitechCorePsyRadioIntent").require( "HiTechKeyword").require("PsytubeKeyword").optionally( "InternetRadioKeyword").build() self.register_intent(intent, self.handle_hitech_psy_intent) intent = IntentBuilder("OrochillPsyRadioIntent").require( "OrochillKeyword").require("PsytubeKeyword").optionally( "InternetRadioKeyword").build() self.register_intent(intent, self.handle_orochill_intent) intent = IntentBuilder("TechnoRadioIntent").require( "TechnoRadioKeyword").optionally("PsytubeKeyword").require( "InternetRadioKeyword").build() self.register_intent(intent, self.handle_techno_intent) intent = IntentBuilder("MinimalTechnoRadioIntent").require( "MinimalTechnoRadioKeyword").optionally("PsytubeKeyword").require( "InternetRadioKeyword").build() self.register_intent(intent, self.handle_minimal_techno_intent) intent = IntentBuilder("DNBRadioIntent").require( "DrumNBassRadioKeyword").optionally("PsytubeKeyword").require( "InternetRadioKeyword").build() self.register_intent(intent, self.handle_dnb_intent) intent = IntentBuilder("PsytubeInternetRadioIntent").require( "InternetRadioKeyword").require("PsytubeKeyword").build() self.register_intent(intent, self.handle_psytube_intent) if AudioService: self.audioservice = AudioService(self.emitter) def handle_psytube_intent(self, message): self.stop() self.speak_dialog('psytube') time.sleep(4) urls = [ "dark_psy_trance_station_url", "progressive_psy_trance_station_url", "dark_progressive_psy_trance_station_url", "forest_psy_trance_station_url", "suomi_psy_trance_station_url", "fullon_psy_trance_station_url", "goa_psy_trance_station_url" ] if self.audioservice: self.audioservice.play(random.choice(urls)) else: # othervice use normal mp3 playback self.process = play_mp3(random.choice(urls)) def handle_hitech_psy_intent(self, message): self.stop() self.speak_dialog('internet.radio') time.sleep(4) if self.audioservice: self.audioservice.play( self.settings['hitech_psy_trance_station_url']) else: # othervice use normal mp3 playback self.process = play_mp3( self.settings['hitech_psy_trance_station_url']) def handle_dark_psy_intent(self, message): self.stop() self.speak_dialog('internet.radio') time.sleep(4) if self.audioservice: self.audioservice.play( self.settings['dark_psy_trance_station_url']) else: # othervice use normal mp3 playback self.process = play_mp3( self.settings['dark_psy_trance_station_url']) def handle_dark_prog_psy_intent(self, message): self.stop() self.speak_dialog('internet.radio') time.sleep(4) if self.audioservice: self.audioservice.play( self.settings['dark_progressive_psy_trance_station_url']) else: # othervice use normal mp3 playback self.process = play_mp3( self.settings['dark_progressive_psy_trance_station_url']) def handle_prog_psy_intent(self, message): self.stop() self.speak_dialog('internet.radio') time.sleep(4) if self.audioservice: self.audioservice.play( self.settings['progressive_psy_trance_station_url']) else: # othervice use normal mp3 playback self.process = play_mp3( self.settings['progressive_psy_trance_station_url']) def handle_fullon_intent(self, message): self.stop() self.speak_dialog('internet.radio') time.sleep(4) if self.audioservice: self.audioservice.play( self.settings['fullon_psy_trance_station_url']) else: # othervice use normal mp3 playback self.process = play_mp3( self.settings['fullon_psy_trance_station_url']) def handle_goa_intent(self, message): self.stop() self.speak_dialog('internet.radio') time.sleep(4) if self.audioservice: self.audioservice.play(self.settings['goa_psy_trance_station_url']) else: # othervice use normal mp3 playback self.process = play_mp3( self.settings['goa_psy_trance_station_url']) def handle_forest_intent(self, message): self.stop() self.speak_dialog('internet.radio') time.sleep(4) if self.audioservice: self.audioservice.play( self.settings['forest_psy_trance_station_url']) else: # othervice use normal mp3 playback self.process = play_mp3( self.settings['forest_psy_trance_station_url']) def handle_suomi_intent(self, message): self.stop() self.speak_dialog('internet.radio') time.sleep(4) if self.audioservice: self.audioservice.play( self.settings['suomi_psy_trance_station_url']) else: # othervice use normal mp3 playback self.process = play_mp3( self.settings['suomi_psy_trance_station_url']) def handle_orochill_intent(self, message): self.stop() self.speak_dialog('internet.radio') time.sleep(4) if self.audioservice: self.audioservice.play(self.settings['orochill_station_url']) else: # othervice use normal mp3 playback self.process = play_mp3(self.settings['orochill_station_url']) def handle_techno_intent(self, message): self.stop() self.speak_dialog('internet.radio') time.sleep(4) if self.audioservice: self.audioservice.play(self.settings['techno_station_url']) else: # othervice use normal mp3 playback self.process = play_mp3(self.settings['techno_station_url']) def handle_minimal_techno_intent(self, message): self.stop() self.speak_dialog('internet.radio') time.sleep(4) if self.audioservice: self.audioservice.play(self.settings['minimal_techno_station_url']) else: # othervice use normal mp3 playback self.process = play_mp3( self.settings['minimal_techno_station_url']) def handle_dnb_intent(self, message): self.stop() self.speak_dialog('internet.radio') time.sleep(4) if self.audioservice: self.audioservice.play(self.settings['drumnbass_station_url']) else: # othervice use normal mp3 playback self.process = play_mp3(self.settings['drumnbass_station_url']) def handle_intent(self, message): self.stop() self.speak_dialog('internet.radio') time.sleep(4) search_string = message.data.get('RadioSearch') if self.audioservice: self.audioservice.play(self.settings['station_url']) else: # othervice use normal mp3 playback self.process = play_mp3(self.settings['station_url']) def handle_hark_intent(self, message): self.stop() self.speak_dialog('internet.radio') time.sleep(4) search_string = message.data.get('RadioSearch') s_details = requests.get('http://greatesthits.rocks:5000/station') stations = s_details.json() stream_url2 = 'none' for station in stations: if (search_string).lower() == station['name'].lower(): stream_url = station['url'] stream_url2 = stream_url.encode('utf-8') self.audioservice.play(stream_url2) break def handle_country_intent(self, message): self.stop() self.speak_dialog('internet.radio') time.sleep(4) if self.audioservice: self.audioservice.play(self.settings['country_station_url']) else: # othervice use normal mp3 playback self.process = play_mp3(self.settings['country_station_url']) def handle_rock_intent(self, message): self.stop() self.speak_dialog('internet.radio') time.sleep(4) if self.audioservice: self.audioservice.play(self.settings['rock_station_url']) else: # othervice use normal mp3 playback self.process = play_mp3(self.settings['rock_station_url']) def handle_classical_intent(self, message): self.stop() self.speak_dialog('internet.radio') time.sleep(4) if self.audioservice: self.audioservice.play(self.settings['classical_station_url']) else: # othervice use normal mp3 playback self.process = play_mp3(self.settings['classical_station_url']) def handle_top40_intent(self, message): self.stop() self.speak_dialog('internet.radio') time.sleep(4) if self.audioservice: self.audioservice.play(self.settings['top40_station_url']) else: # othervice use normal mp3 playback self.process = play_mp3(self.settings['top40_station_url']) def handle_jazz_intent(self, message): self.stop() self.speak_dialog('internet.radio') time.sleep(4) if self.audioservice: self.audioservice.play(self.settings['jazz_station_url']) else: # othervice use normal mp3 playback self.process = play_mp3(self.settings['jazz_station_url']) def handle_christmas_intent(self, message): self.stop() self.speak_dialog('internet.radio') time.sleep(4) if self.audioservice: self.audioservice.play(self.settings['christmas_station_url']) else: # othervice use normal mp3 playback self.process = play_mp3(self.settings['christmas_station_url']) def handle_favorite_intent(self, message): self.stop() self.speak_dialog('internet.radio') time.sleep(4) if self.audioservice: self.audioservice.play(self.settings['favorite_station_url']) else: # othervice use normal mp3 playback self.process = play_mp3(self.settings['favorite_station_url']) def handle_childrens_intent(self, message): self.stop() self.speak_dialog('internet.radio') time.sleep(4) if self.audioservice: self.audioservice.play(self.settings['childrens_station_url']) else: # othervice use normal mp3 playback self.process = play_mp3(self.settings['childrens_station_url']) def handle_stop(self, message): self.stop() self.speak_dialog('internet.radio.stop') def stop(self): if self.audioservice: self.audioservice.stop() else: if self.process and self.process.poll() is None: self.process.terminate() self.process.wait()
class CommonPlaySkill(MycroftSkill, ABC): """ To integrate with the common play infrastructure of Mycroft skills should use this base class and override the two methods `CPS_match_query_phrase` (for checking if the skill can play the utterance) and `CPS_start` for launching the media. The class makes the skill available to queries from the mycroft-playback-control skill and no special vocab for starting playback is needed. """ def __init__(self, name=None, bus=None): super().__init__(name, bus) self.audioservice = None self.play_service_string = None # "MusicServiceSkill" -> "Music Service" spoken = name or self.__class__.__name__ self.spoken_name = re.sub(r"([a-z])([A-Z])", r"\g<1> \g<2>", spoken.replace("Skill", "")) # NOTE: Derived skills will likely want to override self.spoken_name # with a translatable name in their initialize() method. def bind(self, bus): """ Overrides the normal bind method. Adds handlers for play:query and play:start messages allowing interaction with the playback control skill. This is called automatically during setup, and need not otherwise be used. """ if bus: super().bind(bus) self.audioservice = AudioService(self.bus) self.add_event('play:query', self.__handle_play_query) self.add_event('play:start', self.__handle_play_start) def __handle_play_query(self, message): search_phrase = message.data["phrase"] # First, notify the requestor that we are attempting to handle # (this extends a timeout while this skill looks for a match) self.bus.emit(message.response({"phrase": search_phrase, "skill_id": self.skill_id, "searching": True})) # Now invoke the CPS handler to let the skill perform its search result = self.CPS_match_query_phrase(search_phrase) if result: match = result[0] level = result[1] callback = result[2] if len(result) > 2 else None confidence = self.__calc_confidence(match, search_phrase, level) self.bus.emit(message.response({"phrase": search_phrase, "skill_id": self.skill_id, "callback_data": callback, "service_name": self.spoken_name, "conf": confidence})) else: # Signal we are done (can't handle it) self.bus.emit(message.response({"phrase": search_phrase, "skill_id": self.skill_id, "searching": False})) def __calc_confidence(self, match, phrase, level): # "play pandora" # "play pandora is my girlfriend" # "play tom waits on pandora" # Assume the more of the words that get consumed, the better the match consumed_pct = len(match.split()) / len(phrase.split()) if consumed_pct > 1.0: consumed_pct = 1.0 / consumed_pct # deal with over/under-matching # We'll use this to modify the level, but don't want it to allow a # match to jump to the next match level. So bonus is 0 - 0.05 (1/20) bonus = consumed_pct / 20.0 if level == CPSMatchLevel.EXACT: return 1.0 elif level == CPSMatchLevel.MULTI_KEY: return 0.9 + bonus elif level == CPSMatchLevel.TITLE: return 0.8 + bonus elif level == CPSMatchLevel.ARTIST: return 0.7 + bonus elif level == CPSMatchLevel.CATEGORY: return 0.6 + bonus elif level == CPSMatchLevel.GENERIC: return 0.5 + bonus else: return 0.0 # should never happen def __handle_play_start(self, message): if message.data["skill_id"] != self.skill_id: # Not for this skill! return phrase = message.data["phrase"] data = message.data.get("callback_data") # Stop any currently playing audio if self.audioservice.is_playing: self.audioservice.stop() self.bus.emit(Message("mycroft.stop")) # Save for CPS_play() later, e.g. if phrase includes modifiers like # "... on the chromecast" self.play_service_string = phrase # Invoke derived class to provide playback data self.CPS_start(phrase, data) def CPS_play(self, *args, **kwargs): """ Begin playback of a media file or stream Normally this method will be invoked with somthing like: self.CPS_play(url) Advanced use can also include keyword arguments, such as: self.CPS_play(url, repeat=True) Args: same as the Audioservice.play method """ # Inject the user's utterance in case the audio backend wants to # interpret it. E.g. "play some rock at full volume on the stereo" if 'utterance' not in kwargs: kwargs['utterance'] = self.play_service_string self.audioservice.play(*args, **kwargs) def stop(self): if self.audioservice.is_playing: self.audioservice.stop() return True else: return False ###################################################################### # Abstract methods # All of the following must be implemented by a skill that wants to # act as a CommonPlay Skill @abstractmethod def CPS_match_query_phrase(self, phrase): """ Analyze phrase to see if it is a play-able phrase with this skill. Args: phrase (str): User phrase uttered after "Play", e.g. "some music" Returns: (match, CPSMatchLevel[, callback_data]) or None: Tuple containing a string with the appropriate matching phrase, the PlayMatch type, and optionally data to return in the callback if the match is selected. """ # Derived classes must implement this, e.g. # # if phrase in ["Zoosh"]: # return ("Zoosh", CPSMatchLevel.Generic, {"hint": "music"}) # or: # zoosh_song = find_zoosh(phrase) # if zoosh_song and "Zoosh" in phrase: # # "play Happy Birthday in Zoosh" # return ("Zoosh", CPSMatchLevel.MULTI_KEY, {"song": zoosh_song}) # elif zoosh_song: # # "play Happy Birthday" # return ("Zoosh", CPSMatchLevel.TITLE, {"song": zoosh_song}) # elif "Zoosh" in phrase # # "play Zoosh" # return ("Zoosh", CPSMatchLevel.GENERIC, {"cmd": "random"}) return None @abstractmethod def CPS_start(self, phrase, data): """ Begin playing whatever is specified in 'phrase' Args: phrase (str): User phrase uttered after "Play", e.g. "some music" data (dict): Callback data specified in match_query_phrase() """ # Derived classes must implement this, e.g. # self.CPS_play("http://zoosh.com/stream_music") pass
def bind(self, bus): if bus: super().bind(bus) self.audioservice = AudioService(self.bus) self.add_event('play:query', self.__handle_play_query) self.add_event('play:start', self.__handle_play_start)
class CommonPlaySkill(MycroftSkill, ABC): def __init__(self, name=None, bus=None): super().__init__(name, bus) self.audioservice = None self.play_service_string = None # "MusicServiceSkill" -> "Music Service" spoken = name or self.__class__.__name__ self.spoken_name = re.sub(r"([a-z])([A-Z])", r"\g<1> \g<2>", spoken.replace("Skill", "")) # NOTE: Derived skills will likely want to override self.spoken_name # with a translatable name in their initialize() method. def bind(self, bus): if bus: super().bind(bus) self.audioservice = AudioService(self.bus) self.add_event('play:query', self.__handle_play_query) self.add_event('play:start', self.__handle_play_start) def __handle_play_query(self, message): search_phrase = message.data["phrase"] # First, notify the requestor that we are attempting to handle # (this extends a timeout while this skill looks for a match) self.bus.emit( message.response({ "phrase": search_phrase, "skill_id": self.skill_id, "searching": True })) # Now invoke the CPS handler to let the skill perform its search result = self.CPS_match_query_phrase(search_phrase) if result: match = result[0] level = result[1] callback = result[2] if len(result) > 2 else None confidence = self.__calc_confidence(match, search_phrase, level) self.bus.emit( message.response({ "phrase": search_phrase, "skill_id": self.skill_id, "callback_data": callback, "service_name": self.spoken_name, "conf": confidence })) else: # Signal we are done (can't handle it) self.bus.emit( message.response({ "phrase": search_phrase, "skill_id": self.skill_id, "searching": False })) def __calc_confidence(self, match, phrase, level): # "play pandora" # "play pandora is my girlfriend" # "play tom waits on pandora" # Assume the more of the words that get consumed, the better the match consumed_pct = len(match.split()) / len(phrase.split()) if consumed_pct > 1.0: consumed_pct = 1.0 / consumed_pct # deal with over/under-matching # We'll use this to modify the level, but don't want it to allow a # match to jump to the next match level. So bonus is 0 - 0.05 (1/20) bonus = consumed_pct / 20.0 if level == CPSMatchLevel.EXACT: return 1.0 elif level == CPSMatchLevel.MULTI_KEY: return 0.9 + bonus elif level == CPSMatchLevel.TITLE: return 0.8 + bonus elif level == CPSMatchLevel.ARTIST: return 0.7 + bonus elif level == CPSMatchLevel.CATEGORY: return 0.6 + bonus elif level == CPSMatchLevel.GENERIC: return 0.5 + bonus else: return 0.0 # should never happen def __handle_play_start(self, message): if message.data["skill_id"] != self.skill_id: # Not for this skill! return phrase = message.data["phrase"] data = message.data.get("callback_data") # Stop any currently playing audio if self.audioservice.is_playing: self.audioservice.stop() self.bus.emit(Message("mycroft.stop")) # Save for CPS_play() later, e.g. if phrase includes modifiers like # "... on the chromecast" self.play_service_string = phrase # Invoke derived class to provide playback data self.CPS_start(phrase, data) def CPS_play(self, *args, **kwargs): """ Begin playback of a media file or stream Normally this method will be invoked with somthing like: self.CPS_play(url) Advanced use can also include keyword arguments, such as: self.CPS_play(url, repeat=True) Args: same as the Audioservice.play method """ # Inject the user's utterance in case the audio backend wants to # interpret it. E.g. "play some rock at full volume on the stereo" if 'utterance' not in kwargs: kwargs['utterance'] = self.play_service_string self.audioservice.play(*args, **kwargs) def stop(self): if self.audioservice.is_playing: self.audioservice.stop() return True else: return False ###################################################################### # Abstract methods # All of the following must be implemented by a skill that wants to # act as a CommonPlay Skill @abstractmethod def CPS_match_query_phrase(self, phrase): """ Analyze phrase to see if it is a play-able phrase with this skill. Args: phrase (str): User phrase uttered after "Play", e.g. "some music" Returns: (match, CPSMatchLevel[, callback_data]) or None: Tuple containing a string with the appropriate matching phrase, the PlayMatch type, and optionally data to return in the callback if the match is selected. """ # Derived classes must implement this, e.g. # # if phrase in ["Zoosh"]: # return ("Zoosh", CPSMatchLevel.Generic, {"hint": "music"}) # or: # zoosh_song = find_zoosh(phrase) # if zoosh_song and "Zoosh" in phrase: # # "play Happy Birthday in Zoosh" # return ("Zoosh", CPSMatchLevel.MULTI_KEY, {"song": zoosh_song}) # elif zoosh_song: # # "play Happy Birthday" # return ("Zoosh", CPSMatchLevel.TITLE, {"song": zoosh_song}) # elif "Zoosh" in phrase # # "play Zoosh" # return ("Zoosh", CPSMatchLevel.GENERIC, {"cmd": "random"}) return None @abstractmethod def CPS_start(self, phrase, data): """ Begin playing whatever is specified in 'phrase' Args: phrase (str): User phrase uttered after "Play", e.g. "some music" data (dict): Callback data specified in match_query_phrase() """ # Derived classes must implement this, e.g. # self.CPS_play("http://zoosh.com/stream_music") pass
class CommonPlaySkill(NeonSkill, ABC): """ To integrate with the common play infrastructure of Mycroft skills should use this base class and override the two methods `CPS_match_query_phrase` (for checking if the skill can play the utterance) and `CPS_start` for launching the media. The class makes the skill available to queries from the mycroft-playback-control skill and no special vocab for starting playback is needed. """ def __init__(self, name=None, bus=None): super().__init__(name, bus) self.audioservice = None self.play_service_string = None # "MusicServiceSkill" -> "Music Service" spoken = name or self.__class__.__name__ self.spoken_name = re.sub(r"([a-z])([A-Z])", r"\g<1> \g<2>", spoken.replace("Skill", "")) # NOTE: Derived skills will likely want to override self.spoken_name # with a translatable name in their initialize() method. def bind(self, bus): """Overrides the normal bind method. Adds handlers for play:query and play:start messages allowing interaction with the playback control skill. This is called automatically during setup, and need not otherwise be used. """ if bus: super().bind(bus) self.audioservice = AudioService(self.bus) self.add_event('play:query', self.__handle_play_query) self.add_event('play:start', self.__handle_play_start) def __handle_play_query(self, message): """Query skill if it can start playback from given phrase.""" search_phrase = message.data["phrase"] # First, notify the requestor that we are attempting to handle # (this extends a timeout while this skill looks for a match) self.bus.emit(message.response({"phrase": search_phrase, "skill_id": self.skill_id, "searching": True})) # Now invoke the CPS handler to let the skill perform its search result = self.CPS_match_query_phrase(search_phrase, message) if result: match = result[0] level = result[1] callback = result[2] if len(result) > 2 else None confidence = self.__calc_confidence(match, search_phrase, level) self.bus.emit(message.response({"phrase": search_phrase, "skill_id": self.skill_id, "callback_data": callback, "service_name": self.spoken_name, "conf": confidence})) else: # Signal we are done (can't handle it) self.bus.emit(message.response({"phrase": search_phrase, "skill_id": self.skill_id, "searching": False})) def __calc_confidence(self, match, phrase, level): """Translate confidence level and match to a 0-1 value. "play pandora" "play pandora is my girlfriend" "play tom waits on pandora" Assume the more of the words that get consumed, the better the match Arguments: match (str): Matching string phrase (str): original input phrase level (CPSMatchLevel): match level """ consumed_pct = len(match.split()) / len(phrase.split()) if consumed_pct > 1.0: consumed_pct = 1.0 / consumed_pct # deal with over/under-matching # We'll use this to modify the level, but don't want it to allow a # match to jump to the next match level. So bonus is 0 - 0.05 (1/20) bonus = consumed_pct / 20.0 if level == CPSMatchLevel.EXACT: return 1.0 elif level == CPSMatchLevel.MULTI_KEY: return 0.9 + bonus elif level == CPSMatchLevel.TITLE: return 0.8 + bonus elif level == CPSMatchLevel.ARTIST: return 0.7 + bonus elif level == CPSMatchLevel.CATEGORY: return 0.6 + bonus elif level == CPSMatchLevel.GENERIC: return 0.5 + bonus else: return 0.0 # should never happen def __handle_play_start(self, message): """Bus handler for starting playback using the skill.""" if message.data["skill_id"] != self.skill_id: # Not for this skill! return phrase = message.data["phrase"] data = message.data.get("callback_data") # Stop any currently playing audio if self.audioservice.is_playing: self.audioservice.stop() self.bus.emit(message.forward("mycroft.stop")) # TODO: maybe do this with some param to not clear gui in CP skills DM # Save for CPS_play() later, e.g. if phrase includes modifiers like # "... on the chromecast" self.play_service_string = phrase # Invoke derived class to provide playback data self.CPS_start(phrase, data, message) def CPS_play(self, *args, **kwargs): """Begin playback of a media file or stream Normally this method will be invoked with somthing like: self.CPS_play(url) Advanced use can also include keyword arguments, such as: self.CPS_play(url, repeat=True) Args: same as the Audioservice.play method """ # Inject the user's utterance in case the audio backend wants to # interpret it. E.g. "play some rock at full volume on the stereo" if 'utterance' not in kwargs: kwargs['utterance'] = self.play_service_string # TODO: Enable WW, set sink name, set signal to handle WW on stop (like AVMusic) DM # Move gui display to here from Playback-Control skill? DM self.audioservice.play(*args, **kwargs) self.CPS_send_status(uri=args[0], status=CPSTrackStatus.PLAYING_AUDIOSERVICE) def stop(self): """Stop anything playing on the audioservice.""" if self.audioservice.is_playing: self.audioservice.stop() return True else: return False ###################################################################### # Abstract methods # All of the following must be implemented by a skill that wants to # act as a CommonPlay Skill @abstractmethod def CPS_match_query_phrase(self, phrase, message): """Analyze phrase to see if it is a play-able phrase with this skill. Arguments: phrase (str): User phrase uttered after "Play", e.g. "some music" message (Message): Message associated with request Returns: (match, CPSMatchLevel[, callback_data]) or None: Tuple containing a string with the appropriate matching phrase, the PlayMatch type, and optionally data to return in the callback if the match is selected. """ # Derived classes must implement this, e.g. # # if phrase in ["Zoosh"]: # return ("Zoosh", CPSMatchLevel.Generic, {"hint": "music"}) # or: # zoosh_song = find_zoosh(phrase) # if zoosh_song and "Zoosh" in phrase: # # "play Happy Birthday in Zoosh" # return ("Zoosh", CPSMatchLevel.MULTI_KEY, {"song": zoosh_song}) # elif zoosh_song: # # "play Happy Birthday" # return ("Zoosh", CPSMatchLevel.TITLE, {"song": zoosh_song}) # elif "Zoosh" in phrase # # "play Zoosh" # return ("Zoosh", CPSMatchLevel.GENERIC, {"cmd": "random"}) return None @abstractmethod def CPS_start(self, phrase, data, message=None): """Begin playing whatever is specified in 'phrase' Arguments: phrase (str): User phrase uttered after "Play", e.g. "some music" data (dict): Callback data specified in match_query_phrase() """ # Derived classes must implement this, e.g. # self.CPS_play("http://zoosh.com/stream_music") pass def CPS_extend_timeout(self, timeout=5): """Request Common Play Framework to wait another {timeout} seconds for an answer from this skill. Arguments: timeout (int): Number of seconds """ self.bus.emit(Message('play:query.response', {"phrase": self.play_service_string, "searching": True, "timeout": timeout, "skill_id": self.skill_id})) def CPS_send_status(self, artist='', track='', album='', image='', uri='', track_length=None, elapsed_time=None, playlist_position=None, status=CPSTrackStatus.DISAMBIGUATION, **kwargs): """Inform system of playback status. If a skill is handling playback and wants the playback control to be aware of it's current status it can emit this message indicating that it's performing playback and can provide some standard info. All parameters are optional so any can be left out. Also if extra non-standard parameters are added, they too will be sent in the message data. Arguments: artist (str): Current track artist track (str): Track name album (str): Album title image (str): url for image to show uri (str): uri for track track_length (float): track length in seconds elapsed_time (float): current offset into track in seconds playlist_position (int): Position in playlist of current track """ data = {'skill': self.name, 'uri': uri, 'artist': artist, 'album': album, 'track': track, 'image': image, 'track_length': track_length, 'elapsed_time': elapsed_time, 'playlist_position': playlist_position, 'status': status } data = {**data, **kwargs} # Merge extra arguments self.bus.emit(Message('play:status', data)) def CPS_send_tracklist(self, tracklist): """Inform system of playlist track info. Provides track data for playlist Arguments: tracklist (list/dict): Tracklist data """ tracklist = tracklist or [] if not isinstance(tracklist, list): tracklist = [tracklist] for idx, track in enumerate(tracklist): self.CPS_send_status(playlist_position=idx, **track)
class USBMusicSkill(CommonPlaySkill): def __init__(self): super(USBMusicSkill, self).__init__('USBMusicSkill') self.song_list = [] self.prev_status = False self.song_artist = "" self.song_label = "" self.song_album = "" self.Auto_Play = False self.prev_status = False self.status = False self.library_ready = False self.path = "" self.local_path = "" self.smb_path = "" self.smb_uname = "" self.smb_pass = "" self.usb_monitor = NewThread self.usbdevice = usbdev self.observer = self.usbdevice.startListener() self.audio_service = None self.audio_state = 'stopped' # 'playing', 'stopped' LOG.info("USB Music Skill Loaded!") def initialize(self): self.load_data_files(dirname(__file__)) self.audio_service = AudioService(self.bus) LOG.info("USB Music Skill Initialized!") self.halt_usb_monitor_thread() self.init_usb_monitor_thread() self.settings_change_callback = self.on_websettings_changed self.on_websettings_changed() def on_websettings_changed(self): # called when updating mycroft home page self.Auto_Play = self.settings.get( "Auto_Play", False) # used to enable / disable Auto_Play self.local_path = self.settings.get("local_path", "~/Music") self.smb_path = self.settings.get("smb_path", "//192.168.0.20/SMBMusic") self.smb_uname = self.settings.get("smb_uname", "guest") self.smb_pass = self.settings.get("smb_pass", "") LOG.info('USB-Music Settings Changed, AutoPlay now: ' + str(self.Auto_Play)) LOG.info('USB-Music Settings Changed, SMB Path now: ' + str(self.smb_path)) def init_usb_monitor_thread(self): # creates the workout thread self.usb_monitor.idStop = False self.usb_monitor.id = 101 self.usb_monitor.idThread = threading.Thread( target=self.start_usb_thread, args=(self.usb_monitor.id, lambda: self.usb_monitor.idStop)) self.usb_monitor.idThread.start() def halt_usb_monitor_thread(self): # requests an end to the workout try: self.usb_monitor.id = 101 self.usb_monitor.idStop = True self.usb_monitor.idThread.join() except Exception as e: LOG.error( e) # if there is an error attempting the workout then here.... def numeric_replace(self, in_words=""): word_list = in_words.split() return_list = [] for each_word in word_list: try: new_word = w2n.word_to_num(each_word) except Exception as e: # LOG.info(e) new_word = each_word return_list.append(new_word) return_string = ' '.join(str(e) for e in return_list) return return_string def parse_music_utterance(self, phrase): # Todo: move Regex to file for language support # returns what was spoken in the utterance return_type = "any" str_request = str(phrase) LOG.info("Parse Music Received: " + str_request) primary_regex = r"((?<=album) (?P<album>.*$))|((?<=artist) (?P<artist>.*$))|((?<=song) (?P<label>.*$))" if str_request.find('some') != -1: secondary_regex = r"((?<=some) (?P<any>.*$))" else: secondary_regex = r"((?<=play) (?P<any>.*$))" key_found = re.search(primary_regex, str_request) if key_found: LOG.info("Primary Regex Key Found") if key_found.group("label"): LOG.info("found label") return_item = key_found.group("label") return_type = "label" elif key_found.group("artist"): LOG.info("found artist") return_item = key_found.group("artist") return_type = "artist" elif key_found.group("album"): LOG.info("found album") return_item = key_found.group("album") return_type = "album" else: LOG.info("Primary Regex Key Not Found") key_found = re.search(secondary_regex, str_request) if key_found.group("any"): LOG.info("Secondary Regex Key Found") return_item = key_found.group("any") return_type = "any" else: LOG.info("Secondary Regex Key Not Found") return_item = "none" return_type = "none" # Returns the item that was requested and the type of the requested item ie. artist, album, label return return_item, return_type def search_music_library(self, search_string, category="any"): found_list = [ ] # this is a dict that will contain all the items found in the library LOG.info("searching the music library for: " + search_string + ", " + category) if category == "any": found_list = self.search_music_item(search_string, category="label") if len(found_list) > 0: return found_list LOG.info("Label: " + search_string + ", Not Found!") found_list = self.search_music_item(search_string, category="artist") if len(found_list) > 0: return found_list LOG.info("Artist: " + search_string + ", Not Found!") found_list = self.search_music_item(search_string, category="album") if len(found_list) == 0: LOG.info("Album: " + search_string + ", Not Found!") return else: found_list = self.search_music_item(search_string, category=str(category)) if len(found_list) > 0: return found_list def search_music_item(self, search_item, category="label"): # category options: label, artist, album search_item = self.numeric_replace(search_item) found_list = [ ] # this is a dict of all the items found that match the search search_words = search_item.replace("-", "").lower().split() # check each song in the list for strings that match all the words in the search for each_song in self.song_list: # check each song in the list for the one we are looking for item_name = each_song[category].replace("-", "") if len(item_name) > 0: item_name = self.numeric_replace(item_name) if all(words in item_name.lower() for words in search_words): found_length = len(each_song['label'].split()) info = { "location": each_song['location'], "label": each_song['label'], "album": each_song['album'], "artist": each_song['artist'], "source": each_song['source'] } found_list.append(info) LOG.info('Found the following songs: ' + str(found_list)) # remove duplicates temp_list = [] # this is a dict for each_song in found_list: info = { "location": each_song['location'], "label": each_song['label'], "album": each_song['album'], "artist": each_song['artist'], "source": each_song['source'] } # Todo this is missing in the kodi skill???? song_title = str(each_song['label']) if song_title not in str(temp_list): temp_list.append(info) else: if len(each_song['label']) == len(song_title): LOG.info('found duplicate') else: temp_list.append(info) found_list = temp_list return found_list # returns a dictionary of matched movies def merge_library(self, dict1, dict2): return dict1 + dict2 def start_usb_thread(self, my_id, terminate): """ This thread monitors the USB port for an insertion / removal event """ # Todo automatically play when stick is inserted LOG.info("USB Monitoring Loop Started!") while not terminate(): # wait while this interval completes time.sleep( 1 ) # Todo make the polling time a variable or make it a separate thread # get the status of the connected usb device self.status = self.usbdevice.isDeviceConnected() if self.status != self.prev_status: LOG.info("USB Status Changed!") self.prev_status = self.status if self.status: # Device inserted # remove any existing mount points self.usbdevice.uMountPathUsbDevice() LOG.info("Device Inserted!") device = self.usbdevice.getDevData() # mount the device and get the path self.path = self.usbdevice.getMountPathUsbDevice() LOG.info("Stat: " + str(self.status)) LOG.info("dev: " + str(device)) LOG.info("path: " + str(self.path)) LOG.info("---------------------------------") self.speak_dialog('update.library', data={"source": str("usb")}, expect_response=False) wait_while_speaking() self.song_list = [ i for i in self.song_list if not (i['type'] == 'usb') ] self.song_list = self.merge_library( self.song_list, self.create_library(self.path, "usb")) if self.Auto_Play: self.play_all(self.song_list) else: self.audio_service.stop() # unmount the path self.usbdevice.uMountPathUsbDevice() LOG.info("Device Removed!") # Todo remove context "USB" so all play requests start with this skill self.speak_dialog('usb.removed', expect_response=False) wait_while_speaking() self.song_list = [] self.path = "" self.on_websettings_changed() self.usbdevice.stopListener(self.observer) def play_all(self, library): LOG.info('Automatically playing the USB Device') tracklist = [] for each_song in library: LOG.info("CPS Now Playing... " + each_song['label'] + " from location: " + each_song['location']) url = each_song['location'] tracklist.append(url) random.shuffle(tracklist) #LOG.info(str(tracklist)) self.speak_dialog('now.playing') wait_while_speaking() self.audio_service.play(tracklist) self.audio_state = 'playing' def create_library(self, source_path, source_type="usb"): self.library_ready = False new_library = [] for root, d_names, f_names in os.walk(str(source_path)): for fileName in f_names: if ("mp3" or "flac") in str(fileName): song_path = str(root) + "/" + str(fileName) try: audio = EasyID3(song_path) if len(audio) > 0: # An ID3 tag found if audio["title"] is None: self.song_label = str(fileName)[:-4] else: self.song_label = audio["title"][0] if audio["artist"] is None: self.song_artist = "" else: self.song_artist = audio["artist"][0] if audio["album"] is None: self.song_album = "" else: self.song_album = audio["album"][0] else: # There was no ID3 Tag found self.song_label = str(fileName)[:-4] self.song_artist = "" self.song_album = "" except: self.song_label = str(fileName)[:-4] self.song_artist = "" self.song_album = "" pass info = { "location": song_path, "label": self.song_label, "artist": self.song_artist, "album": self.song_album, "source": str(source_type) } new_library.append(info) #LOG.info("Added to library: " + str(info)) song_count = len(new_library) self.speak_dialog('scan.complete', data={ "count": str(song_count), "source": str(source_type) }, expect_response=False) wait_while_speaking() LOG.info("Added: " + str(song_count) + " to the library from the " + str(source_type) + " Device") self.library_ready = True return new_library def CPS_match_query_phrase(self, phrase): """ The method is invoked by the PlayBackControlSkill. """ LOG.info('USBMusicSkill received the following phrase: ' + phrase) if self.status or self.library_ready: # Confirm the USB is inserted LOG.info("USBMusicSkill is Searching for requested media...") play_request = self.parse_music_utterance( phrase) # get the requested Music Item LOG.info("USBMusicSkill Parse Routine Returned: " + str(play_request)) music_playlist = self.search_music_library( play_request[0], category=play_request[1]) # search for the item in the library if music_playlist is None: return None # until a match is found else: if len(music_playlist) > 0: match_level = CPSMatchLevel.EXACT data = music_playlist LOG.info('Music found that matched the request!') return phrase, match_level, data else: return None # until a match is found else: LOG.info("Device or Library Not Ready, Passing on this request") return None def CPS_start(self, phrase, data): """ Starts playback. Called by the playback control skill to start playback if the skill is selected (has the best match level) """ tracklist = [] LOG.info( 'USBMusicSkill, Playback received the following phrase and Data: ' + phrase + ' ' + str(data)) for each_song in data: LOG.info("CPS Now Playing... " + each_song['label'] + " from location: " + each_song['location']) url = each_song['location'] tracklist.append(url) #LOG.info(str(tracklist)) self.speak_dialog('now.playing') wait_while_speaking() self.audio_service.play(tracklist) self.audio_state = 'playing' pass @intent_handler( IntentBuilder('').require("UpdateKeyword").require( "USBKeyword").require("LibraryKeyword")) def handle_update_usb_library_intent(self, message): LOG.info("Called Update Library Intent") if self.usbdevice.isDeviceConnected(): device = self.usbdevice.getDevData() # mount the device and get the path self.path = self.usbdevice.getMountPathUsbDevice() self.speak_dialog( 'update.library', data={"source": str(message.data.get("USBKeyword"))}, expect_response=False) wait_while_speaking() self.song_list = [ i for i in self.song_list if not (i['type'] == 'usb') ] self.song_list = self.merge_library( self.song_list, self.create_library(self.path, "usb")) else: self.usbdevice.uMountPathUsbDevice() # Play Music Added here LOG.info("USB Device Not Detected") # Todo: Add an unmount / release command @intent_handler( IntentBuilder('').require("RemoveKeyword").require("USBKeyword")) def handle_remove_usb_intent(self, message): self.usbdevice.uMountPathUsbDevice() LOG.info("Device Removed!") @intent_handler( IntentBuilder('').require("UpdateKeyword").require( "NetworkKeyword").require("LibraryKeyword")) def handle_get_smb_music_intent(self, message): self.path = self.usbdevice.MountSMBPath(self.smb_path, self.smb_uname, self.smb_pass) self.speak_dialog( 'update.library', data={"source": str(message.data.get("NetworkKeyword"))}, expect_response=False) wait_while_speaking() self.song_list = [ i for i in self.song_list if not (i['type'] == 'smb') ] self.song_list = self.merge_library( self.song_list, self.create_library(self.path, "smb")) LOG.info("SMB Mounted!") @intent_handler( IntentBuilder('').require("UpdateKeyword").require( "LocalKeyword").require("LibraryKeyword")) def handle_get_local_music_intent(self, message): self.path = self.local_path self.speak_dialog( 'update.library', data={"source": str(message.data.get("LocalKeyword"))}, expect_response=False) wait_while_speaking() self.song_list = [ i for i in self.song_list if not (i['type'] == 'local') ] self.song_list = self.merge_library( self.song_list, self.create_library(self.path, "local")) LOG.info("Local Mounted!") @intent_handler( IntentBuilder('').require("UpdateKeyword").require( "MusicKeyword").require("LibraryKeyword")) def handle_get_All_available_intent(self, message): self.path = self.usbdevice.MountSMBPath(self.smb_path, self.smb_uname, self.smb_pass) self.speak_dialog( 'update.library', data={"source": str(message.data.get("MusicKeyword"))}, expect_response=False) wait_while_speaking() self.song_list = [ i for i in self.song_list if not (i['type'] == 'smb') ] self.song_list = self.merge_library( self.song_list, self.create_library(self.path, "smb")) LOG.info("SMB Mounted!") self.path = self.local_path self.song_list = [ i for i in self.song_list if not (i['type'] == 'local') ] self.song_list = self.merge_library( self.song_list, self.create_library(self.path, "local")) LOG.info("Local Mounted!") if self.usbdevice.isDeviceConnected(): device = self.usbdevice.getDevData() # mount the device and get the path self.path = self.usbdevice.getMountPathUsbDevice() self.speak_dialog( 'update.library', data={"source": str(message.data.get("USBKeyword"))}, expect_response=False) wait_while_speaking() self.song_list = [ i for i in self.song_list if not (i['type'] == 'usb') ] self.song_list = self.merge_library( self.song_list, self.create_library(self.path, "usb")) @intent_handler( IntentBuilder('').require("StartKeyword").require( "USBKeyword").require('ScanKeyword')) def handle_start_usb_intent(self, message): LOG.info('Thread Running: ' + str(self.usb_monitor.idThread.isAlive())) if self.usb_monitor.idThread.isAlive(): LOG.info("Scan is already running!") else: LOG.info("Scan should start!") self.init_usb_monitor_thread() @intent_handler( IntentBuilder('').require("ShowKeyword").require( "MusicKeyword").require('LibraryKeyword')) def handle_show_music_library_intent(self, message): LOG.info(str(self.song_list)) LOG.info('Library Size: ' + str(len(self.song_list))) def stop(self): if self.audio_state == 'playing': self.audio_service.stop() LOG.debug('Stopping stream') self.audio_state = 'stopped' return True #LOG.info('Stopping USB Monitor Thread!') #self.halt_usb_monitor_thread() pass
class PodcastSkill(MycroftSkill): def __init__(self): super(PodcastSkill, self).__init__(name="PodcastSkill") self.process = None self.audioservice = None def initialize(self): if AudioService: self.audioservice = AudioService(self.emitter) def chosen_podcast(self, utter, podcast_names, podcast_urls): for index, name in enumerate(podcast_names): #skip if podcast slot left empty if not name: continue if name.lower() in utter.lower(): listen_url = podcast_urls[index] break else: listen_url = "" return listen_url @intent_handler(IntentBuilder("PlayPodcastIntent").require("PlayPodcastKeyword")) def handle_play_podcast_intent(self, message): utter = message.data['utterance'] self.enclosure.mouth_think() podcast_names = [self.settings["nameone"], self.settings["nametwo"], self.settings["namethree"]] podcast_urls = [self.settings["feedone"], self.settings["feedtwo"], self.settings["feedthree"]] for try_count in range(0,2): listen_url = self.chosen_podcast(utter, podcast_names, podcast_urls) if listen_url: break utter = self.get_response('nomatch') else: self.speak_dialog('not.found') return False #normalise feed and parse it normalised_feed = pp.normalize_feed_url(listen_url) parsed_feed = pp.parse(normalised_feed, urllib.urlopen(normalised_feed)) #Check what episode the user wants episode_index = 0 #This block adds functionality for the user to choose an episode while(True): episode_title = parsed_feed['episodes'][episode_index]['title'] podcast_title = parsed_feed['title'] data_dict = {"podcast_title": podcast_title, "episode_title": episode_title} if episode_index == 0: response = self.get_response('play.previous', data=data_dict, on_fail='please.repeat') else: response = self.get_response('play.next.previous', data=data_dict, on_fail='please.repeat') #error check if response is None: break if "stop" in response: self.speak("Operation cancelled.") return False elif "play" in response: break elif "previous" in response: episode_index += 1 elif "next" in response: #ensure index doesnt go below zero if episode_index != 0: episode_index -= 1 self.speak("Playing podcast.") wait_while_speaking() #try and parse the rss feed, some are incompatible try: episode = (parsed_feed["episodes"][episode_index]["enclosures"][0]["url"]) except: self.speak_dialog('badrss') #check for any redirects episode = urllib.urlopen(episode) redirected_episode = episode.geturl() #convert stream to http for mpg123 compatibility http_episode = re.sub('https', 'http', redirected_episode) # if audio service module is available use it if self.audioservice: self.audioservice.play(http_episode, message.data['utterance']) else: # othervice use normal mp3 playback self.process = play_mp3(http_episode) self.enclosure.mouth_text(episode_title) @intent_handler(IntentBuilder("LatestEpisodeIntent").require("LatestEpisodeKeyword")) def handle_latest_episode_intent(self, message): utter = message.data['utterance'] self.enclosure.mouth_think() podcast_names = [self.settings["nameone"], self.settings["nametwo"], self.settings["namethree"]] podcast_urls = [self.settings["feedone"], self.settings["feedtwo"], self.settings["feedthree"]] #check if the user specified a podcast to check for a new podcast for index, name in enumerate(podcast_names): #skip if podcast slot left empty if not name: continue if name.lower() in utter.lower(): parsed_feed = pp.parse(podcast_urls[index], urllib.urlopen(podcast_urls[index])) last_episode = (parsed_feed['episodes'][0]['title']) speech_string = "The latest episode of " + name + " is " + last_episode break else: #if no podcast names are provided, list all new episodes new_episodes = [] for index, url in enumerate(podcast_urls): #skip if url slot left empty if not url: continue parsed_feed = pp.parse(podcast_urls[index], urllib.urlopen(podcast_urls[index])) last_episode = (parsed_feed['episodes'][0]['title']) new_episodes.append(last_episode) #skip if i[0] slot left empty elements = [": ".join(i) for i in zip(podcast_names, new_episodes) if i[0]] speech_string = "The latest episodes are the following: " speech_string += ", ".join(elements[:-2] + [" and ".join(elements[-2:])]) self.speak(speech_string) def stop(self): if self.audioservice: self.audioservice.stop() else: if self.process and self.process.poll() is None: self.process.terminate() self.process.wait()
class WhiteNoiseSkill(MycroftSkill): def __init__(self): super(WhiteNoiseSkill, self).__init__(name="WhiteNoiseSkill") self.play_list = { 'waves': join(dirname(__file__), "waves.mp3"), 'rain': join(dirname(__file__), "rain.mp3"), 'wind': join(dirname(__file__), "wind.mp3"), } def initialize(self): self.audio_service = AudioService(self.bus) def handle_white_noise_intent(self, message): utterance = message.data.get('utterance', "") try: track_duration = int( extract_duration(utterance)[0].total_seconds()) except AttributeError: return None self.play_track(join(dirname(__file__), "whitenoise.mp3"), track_duration, utterance) @intent_file_handler("white.noice.intent") def handle_file_white_noice_intent(self, message): self.handle_white_noise_intent(message) @intent_file_handler("stop.white.noice.intent") def handle_stop_white_noice_intent(self, message): self.enclosure.mouth_reset() self.audio_service.stop() @intent_file_handler("waves.rain.wind.intent") def handle_file_rain_waves_wind_intent(self, message): self.handle_rain_waves_wind_intent(message) @intent_file_handler("stop.waves.rain.wind.intent") def handle_stop_rain_waves_wind_intent(self, message): self.enclosure.mouth_reset() self.audio_service.stop() def handle_rain_waves_wind_intent(self, message): utterance = message.data.get('utterance', "") match, confidence = match_one(utterance, self.play_list) try: track_duration = int( extract_duration(utterance)[0].total_seconds()) except AttributeError: return None self.play_track(match, track_duration, utterance) def play_track(self, track_name, track_duration, utterance): if (track_duration > 0): self.log.info('track_duration is: ' + str(track_duration)) self.log.info('track url is: ' + track_name) self.audio_service.play(track_name, utterance, True) while track_duration > 0: time.sleep(1) track_duration -= 1 self.audio_service.stop() else: return None def stop(self): pass
def initialize(self): self.log.info('initializing Playback Control Skill') self.audio_service = AudioService(self.emitter)
def initialize(self): self.audioservice = AudioService(self.bus)
class Deezer(CommonPlaySkill): def __init__(self): super(Deezer, self).__init__() self.regexes = {} self.playlist_data = None self.playing_wait_thread = None self.playing_thread = None self.playlist_playing_index = Value('i', -1) self.playing_seconds = Value('i', -1) def initialize(self): super().initialize() self.audio_service = AudioService(self.bus) self.add_event('mycroft.audio.service.next', self.next_track) self.add_event('mycroft.audio.service.prev', self.prev_track) self.add_event('mycroft.audio.service.pause', self.pause) self.add_event('mycroft.audio.service.resume', self.resume) self.arl = self.settings.get('arl') # TODO directory should probably default to self.file_system.path # This is a unique directory for each Skill. # There's also mycroft.util.get_cache_directory if you intend it to be temporary self.music_dir = self.settings.get('music_dir', self.file_system.path) self.track_directory = os.path.join(self.music_dir, "track") def CPS_match_query_phrase(self, phrase): self.log.info(f"Check Query Phrase: {phrase}") phrase, cps_match_level, data = self.specific_query(phrase=phrase) if cps_match_level is None: track = deezer_utils.search_first_track(track_name=phrase, arl=self.arl) if track is None: return None else: track_id = track.get('id') self.speak_dialog(key="track_found", data={ 'title_short': track["title_short"], 'artist': track['artist']['name'] }) download_path = deezer_utils.download_track( track_id=track_id, track_directory=self.track_directory, arl=self.arl) data = { "type": 0, "track": download_path, "track_id": track_id } if 'deezer' in phrase: cps_match_level = CPSMatchLevel.EXACT else: cps_match_level = CPSMatchLevel.TITLE return phrase, cps_match_level, data """ This method responds wether the skill can play the input phrase. The method is invoked by the PlayBackControlSkill. Returns: tuple (matched phrase(str), match level(CPSMatchLevel), optional data(dict)) or None if no match was found. """ def CPS_start(self, phrase, data): if self.playing_thread is not None: self.playing_thread.kill() self.playing_thread = None if self.playlist_data is not None: self.playlist_data = None if self.playlist_playing_index.value is not None: self.playlist_playing_index.value = -1 if data['type'] == 0: self.log.info("TrackType is Track") self.CPS_play(data['track']) elif data['type'] == 1: self.log.info("TrackType is Playlist") playlist = data['playlist'] self.playlist_data = data playlist_search_results = data['playlist_search_results'] track_directory = os.path.join(self.music_dir, str(playlist_search_results['id'])) self.playing_thread = Process(target=self.playing_playlist, args=(playlist, track_directory, 0, -1)) self.playing_thread.start() self.playing_thread.join() shutil.rmtree(track_directory, ignore_errors=True) """ Starts playback. Called by the playback control skill to start playback if the skill is selected (has the best match level) """ def specific_query(self, phrase): # Check if saved # match = re.match(self.translate_regex('saved_songs'), phrase) # if match and self.saved_tracks: # return (1.0, {'data': None, # 'type': 'saved_tracks'}) # Check if playlist phrase = phrase.lower() match = re.match(self.translate_regex('playlist'), phrase) if match: playlist_search_results = deezer_utils.search_first_playlist( match.groupdict()['playlist'], self.arl) if playlist_search_results: tracklist = requests.get( playlist_search_results['tracklist']).json() try: data = tracklist["data"] next_tracklist_url = tracklist['next'] try: while True: next_tracklist = requests.get( next_tracklist_url).json() data += next_tracklist['data'] next_tracklist_url = next_tracklist['next'] self.log.info(next_tracklist_url) except KeyError as index: pass except KeyError as dataError: pass return_data = { 'type': 1, 'playlist': data, 'playlist_search_results': playlist_search_results } return phrase, CPSMatchLevel.TITLE, return_data else: return phrase, CPSMatchLevel.GENERIC, None # Check album # match = re.match(self.translate_regex('album'), phrase) # if match: # album = match.groupdict()['album'] # return self.query_album(album) # # # Check artist # match = re.match(self.translate_regex('artist'), phrase) # if match: # artist = match.groupdict()['artist'] # return self.query_artist(artist) # match = re.match(self.translate_regex('song'), phrase) # if match: # song = match.groupdict()['track'] # return self.query_song(song) return phrase, None, None def playing_playlist(self, playlist, track_directory, start_index, seek): for i in range(start_index, len(playlist)): try: self.playlist_playing_index.value = i track_id = playlist[i]['id'] downloaded_track = deezer_utils.download_track( track_id=track_id, track_directory=track_directory, arl=self.arl) self.log.info(str(downloaded_track)) if seek > -1: self.audio_service.seek(seconds=seek) self.audio_service.resume() else: self.CPS_play(downloaded_track) self.log.info("Playing now ...") duration = playlist[i]['duration'] for d in range(0, duration): self.playing_seconds.value = d time.sleep(1) shutil.rmtree(downloaded_track, ignore_errors=True) except Exception as e: print(e) self.log.error(e) def next_track(self): if self.playlist_data is not None: if self.playing_thread is not None: self.playing_thread.kill() self.playing_thread = None playlist_search_results = self.playlist_data[ 'playlist_search_results'] track_directory = os.path.join(self.music_dir, str(playlist_search_results['id'])) if self.playlist_playing_index.value + 1 >= len( self.playlist_data['playlist']): self.speak_dialog( key='playlist.end', data={ 'title': self.playlist_data['playlist_search_results']['title'] }) self.playlist_data = None self.playlist_playing_index.value = -1 shutil.rmtree(track_directory) return self.playing_thread = Process( target=self.playing_playlist, args=(self.playlist_data['playlist'], track_directory, self.playlist_playing_index.value + 1, -1)) self.playing_thread.start() self.playing_thread.join() def prev_track(self): if self.playlist_data is not None: if self.playing_thread is not None: self.playing_thread.kill() self.playing_thread = None playlist_search_results = self.playlist_data[ 'playlist_search_results'] track_directory = os.path.join(self.music_dir, str(playlist_search_results['id'])) if self.playlist_playing_index.value + 1 >= len( self.playlist_data['playlist']): self.speak_dialog( key='playlist.end', data={ 'title': self.playlist_data['playlist_search_results']['title'] }) self.playlist_data = None self.playlist_playing_index.value = -1 shutil.rmtree(track_directory) return index = self.playlist_playing_index.value - 1 if index < 0: index = index + 1 self.playing_thread = Process(target=self.playing_playlist, args=(self.playlist_data['playlist'], track_directory, index, -1)) self.playing_thread.start() self.playing_thread.join() def pause(self): if self.playlist_data is not None: if self.playing_thread is not None: self.playing_thread.kill() self.playing_thread = None self.audio_service.pause() def resume(self): if self.playlist_data is not None: if self.playing_thread is not None: self.playing_thread.kill() self.playing_thread = None playlist_search_results = self.playlist_data[ 'playlist_search_results'] track_directory = os.path.join(self.music_dir, str(playlist_search_results['id'])) self.playing_thread = Process( target=self.playing_playlist, args=(self.playlist_data['playlist'], track_directory, self.playlist_playing_index.value, self.playing_seconds.value)) self.playing_thread.start() self.playing_thread.join() pass def translate_regex(self, regex): if regex not in self.regexes: path = self.find_resource(regex + '.regex') if path: with open(path) as f: string = f.read().strip() self.regexes[regex] = string return self.regexes[regex] @intent_handler('user.intent') def speak_user_name(self, message): self.log.info("Username Intent") self.speak_dialog( key='user', data={'user_name': deezer_utils.get_user_info(arl=self.arl)})
class OneBeforeLastSkill(MycroftSkill): def __init__(self): super(OneBeforeLastSkill, self).__init__(name="OneBeforeLastSkill") self.answer_index = 0 self.question_index = 0 self.wrong = 0 def initialize(self): self.audio_service = AudioService(self.emitter) self.set_visual() @intent_handler(IntentBuilder("StartupIntent").require("Startup")) @adds_context("IsPlaying") def startup_intent(self, message): self.speak_dialog("welcome") fp = open(os.path.join(os.path.split(__file__)[0], "tfqdb.json")) self.questions = json.load(fp) fp.close() random.shuffle(self.questions) self.questions = self.questions[:4] self.speak_dialog("first.question.intro") self.speak_dialog("question", data={"question": self.questions[0]["question"], "index": 1}) self.speak_dialog("second.question.intro") self.speak_dialog("question", data={"question": self.questions[1]["question"], "index": 2}) self.speak_dialog("second.question.ask", expect_response=True) self.answer_index = 0 self.question_index = 1 @intent_handler(IntentBuilder("RepeatIntent").require("Repeat")) @adds_context("IsPlaying") def repeat_intent(self, message): self.speak_dialog("question", data={"question": self.questions[self.question_index]["question"], "index": self.question_index+1}, expect_response=True) @intent_handler(IntentBuilder("AnsweredTrueIntent").require("True").require("IsPlaying")) def answer_true_intent(self, message): self.handle_answer(expected=True) @intent_handler(IntentBuilder("AnsweredFalseIntent").require("False").require("IsPlaying")) def answer_false_intent(self, message): self.handle_answer(expected=False) def handle_answer(self, expected): if self.questions[self.answer_index]["answer"] == expected: # correct! self.play_sound("correct.mp3") self.speak_dialog("correct") self.set_visual() self.question_index += 1 self.answer_index += 1 if self.question_index == len(self.questions): self.speak_dialog("final.answer", expect_response=True) elif self.question_index > len(self.questions): self.play_sound("win.mp3") self.speak_dialog("complete") else: self.speak_dialog("question", data={"question": self.questions[self.question_index]["question"], "index": self.question_index+1}, expect_response=True) else: # wrong! self.wrong += 1 self.set_visual() if self.wrong == 3: self.fail() return self.play_sound("wrong.mp3") self.speak_dialog("wrong", data={"count": self.wrong}) self.question_index = 1 self.answer_index = 0 self.speak_dialog("question", data={"question": self.questions[0]["question"], "index": 1}) self.speak_dialog("question", data={"question": self.questions[1]["question"], "index": 2}) self.speak_dialog("second.question.ask", expect_response=True) def fail(self): self.play_sound("failure.mp3") self.speak_dialog("fail") def play_sound(self, sound): self.audio_service.play(os.path.join(os.path.split(__file__)[0], "sounds", sound)) def stop(self): self.stop_requested = True return False def set_visual(self): g = GRID[:] for i in range(self.answer_index): g = MycroftDisplay.utils.insert_grid(g, "#", 4+(i*2), 1) for i in range(self.wrong): g = MycroftDisplay.utils.insert_grid(g, CROSS, 4+(i*8), 3) for img_code, x, y in MycroftDisplay.Mark1.from_large_grid(g): self.enclosure.mouth_display(img_code=img_code, x=x, y=y)
class Emby(CommonPlaySkill): def __init__(self): super().__init__() self._setup = False self.audio_service = None self.emby_croft = None self.device_id = hashlib.md5( ('Emby'+DeviceApi().identity.uuid).encode())\ .hexdigest() def initialize(self): pass @intent_file_handler('emby.intent') def handle_emby(self, message): self.log.log(20, message.data) # first thing is connect to emby or bail if not self.connect_to_emby(): self.speak_dialog('configuration_fail') return # determine intent intent, intent_type = EmbyCroft.determine_intent(message.data) songs = [] try: songs = self.emby_croft.handle_intent(intent, intent_type) except Exception as e: self.log.log(20, e) self.speak_dialog('play_fail', {"media": intent}) if not songs or len(songs) < 1: self.log.log(20, 'No songs Returned') self.speak_dialog('play_fail', {"media": intent}) else: # setup audio service and play self.audio_service = AudioService(self.bus) self.speak_playing(intent) self.audio_service.play(songs, message.data['utterance']) def speak_playing(self, media): data = dict() data['media'] = media self.speak_dialog('emby', data) @intent_file_handler('diagnostic.intent') def handle_diagnostic(self, message): self.log.log(20, message.data) self.speak_dialog('diag_start') # connec to emby for diagnostics self.connect_to_emby(diagnostic=True) connection_success, info = self.emby_croft.diag_public_server_info() if connection_success: self.speak_dialog('diag_public_info_success', info) else: self.speak_dialog('diag_public_info_fail', {'host': self.settings['hostname']}) self.speak_dialog('general_check_settings_logs') self.speak_dialog('diag_stop') return if not self.connect_to_emby(): self.speak_dialog('diag_auth_fail') self.speak_dialog('diag_stop') return else: self.speak_dialog('diag_auth_success') self.speak_dialog('diagnostic') def stop(self): pass def CPS_start(self, phrase, data): """ Starts playback. Called by the playback control skill to start playback if the skill is selected (has the best match level) """ # setup audio service self.audio_service = AudioService(self.bus) self.audio_service.play(data[phrase]) def CPS_match_query_phrase(self, phrase): """ This method responds whether the skill can play the input phrase. The method is invoked by the PlayBackControlSkill. Returns: tuple (matched phrase(str), match level(CPSMatchLevel), optional data(dict)) or None if no match was found. """ # first thing is connect to emby or bail if not self.connect_to_emby(): return None self.log.log(20, phrase) match_type, songs = self.emby_croft.parse_common_phrase(phrase) if match_type and songs: match_level = None if match_type is not None: self.log.log(20, 'Found match of type: ' + match_type) if match_type == 'song' or match_type == 'album': match_level = CPSMatchLevel.TITLE elif match_type == 'artist': match_level = CPSMatchLevel.ARTIST self.log.log(20, 'match level' + str(match_level)) song_data = dict() song_data[phrase] = songs self.log.log(20, "First 3 item urls returned") max_songs_to_log = 3 songs_logged = 0 for song in songs: self.log.log(20, song) songs_logged = songs_logged + 1 if songs_logged >= max_songs_to_log: break return phrase, match_level, song_data else: return None def connect_to_emby(self, diagnostic=False): """ Attempts to connect to the server based on the config if diagnostic is False an attempt to auth is also made returns true/false on success/failure respectively :return: """ auth_success = False try: self.emby_croft = EmbyCroft( self.settings["hostname"] + ":" + str(self.settings["port"]), self.settings["username"], self.settings["password"], self.device_id, diagnostic) auth_success = True except Exception as e: self.log.log( 20, "failed to connect to emby, error: {0}".format(str(e))) return auth_success
class NatureSoundSkill(MycroftSkill): # The constructor of the skill, which calls MycroftSkill's constructor def __init__(self): super(NatureSoundSkill, self).__init__(name="NatureSoundSkill") self.audioservice = None def getPath(self, name): return (join(dirname(__file__), "mp3", name)) # This method loads the files needed for the skill's functioning, and # creates and registers each intent that the skill uses def initialize(self): self.load_data_files(dirname(__file__)) self.audioservice = None if AudioService: self.audioservice = AudioService(self.emitter) self.audioservice.stop() else: self.process.terminate() self.process.wait() river_intent = IntentBuilder("RiverIntent").\ require("PlayKeyword").\ require("RainyRiverKeyword").build() self.register_intent(river_intent, self.handle_river_intent) dawn_intent = IntentBuilder("DawnIntent").\ require("PlayKeyword").\ require("DawnKeyword").build() self.register_intent(dawn_intent, self.handle_dawn_intent) thunderstorm_intent = IntentBuilder("ThunderstormIntent").\ require("PlayKeyword").\ require("ThunderstormKeyword").build() self.register_intent(thunderstorm_intent, self.handle_thunderstorm_intent) tropical_storm_intent = IntentBuilder("TropicalStormIntent").\ require("PlayKeyword").\ require("TropicalStormKeyword").build() self.register_intent(tropical_storm_intent, self.handle_tropical_storm_intent) ocean_intent = IntentBuilder("OceanIntent").\ require("PlayKeyword").\ require("OceanKeyword").build() self.register_intent(ocean_intent, self.handle_ocean_intent) rainforest_intent = IntentBuilder("RainforestIntent").\ require("PlayKeyword").\ require("RainforestKeyword").build() self.register_intent(rainforest_intent, self.handle_rainforest_intent) # The "handle_xxxx_intent" functions define Mycroft's behavior when # each of the skill's intents is triggered: in this case, he simply # speaks a response. Note that the "speak_dialog" method doesn't # actually speak the text it's passed--instead, that text is the filename # of a file in the dialog folder, and Mycroft speaks its contents when # the method is called. #TODO: Loop mp3s def handle_river_intent(self, message): path = self.getPath("rainy-river.mp3") if self.audioservice: self.audioservice.play(path, message.data['utterance']) else: self.process = play_mp3(path) self.speak_dialog("info", {"environment": "Rainy river"}) def handle_dawn_intent(self, message): path = self.getPath("dawn-chorus.mp3") if self.audioservice: self.audioservice.play(path, message.data['utterance']) else: self.process = play_mp3(path) self.speak_dialog("info", {"environment": "Dawn chorus"}) def handle_thunderstorm_intent(self, message): path = self.getPath("urban-thunderstorm.mp3") if self.audioservice: self.audioservice.play(path, message.data['utterance']) else: self.process = play_mp3(path) self.speak_dialog("info", {"environment": "Thunderstorm"}) def handle_tropical_storm_intent(self, message): path = self.getPath("tropical-storm.mp3") if self.audioservice: self.audioservice.play(path, message.data['utterance']) else: self.process = play_mp3(path) self.speak_dialog("info", {"environment": "Tropical Storm"}) def handle_rainforest_intent(self, message): path = self.getPath("rainforest.mp3") if self.audioservice: self.audioservice.play(path, message.data['utterance']) else: self.process = play_mp3(path) self.speak_dialog("info", {"environment": "Rainforest"}) def handle_ocean_intent(self, message): path = self.getPath("ocean-waves.mp3") if self.audioservice: self.audioservice.play(path, message.data['utterance']) else: self.process = play_mp3(path) self.speak_dialog("info", {"environment": "Ocean waves"}) # The "stop" method defines what Mycroft does when told to stop during # the skill's execution. In this case, since the skill's functionality # is extremely simple, the method just contains the keyword "pass", which # does nothing. def stop(self): if self.audioservice: self.audioservice.stop() else: self.process.terminate() self.process.wait()
class TemplateSkill(MycroftSkill): # The constructor of the skill, which calls MycroftSkill's constructor def __init__(self): super(TemplateSkill, self).__init__(name="TemplateSkill") # Initialize working variables used within the skill. self.count = 0 self.process = None self.play_list = {0: join(dirname(__file__), "Seasons.mp3")} def initialize(self): self.audioservice = AudioService(self.bus) launch_intent = IntentBuilder("music zing").require("Music").require( "zing").build() self.register_intent(launch_intent, self.handle_play_zing_mp3) def handle_play_zing_mp3(self, message): self.speak('Here am I,Which song you want to play', expect_response=True) def converse(self, utterances, lang="en-us"): print("Seaching Song:" + utterances[0] + "...") self.speak('Play:' + utterances[0]) key_word = utterances[0] resp = requests.get( 'http://ac.mp3.zing.vn/complete/desktop?type=song&query=' + urllib.parse.quote(key_word)) resultJson = json.dumps(resp.json()) obj = json.loads(resultJson) songID = obj["data"][1]['song'][0]['id'] songUrl = "https://mp3.zing.vn/bai-hat/" + songID + ".html" print(songUrl) resp = requests.get(songUrl) key = re.findall( 'data-xml="\/media\/get-source\?type=audio&key=([a-zA-Z0-9]{20,35})', resp.text) print(key[0]) songApiUrl = "https://mp3.zing.vn/xhr/media/get-source?type=audio&key=" + key[ 0] resp = requests.get(songApiUrl) resultJson = json.dumps(resp.json()) obj = json.loads(resultJson) mp3Source = "https:" + obj["data"]["source"]["128"] realURLdata = requests.get(mp3Source, allow_redirects=False) realURL = realURLdata.headers['Location'] resp = requests.get(realURL, stream=True) file_path = join(dirname(__file__), "song.mp3") with open(file_path, 'wb') as fh: for chunk in resp.iter_content(chunk_size=1024): fh.write(chunk) try: self.audioservice.play(file_path) except Exception as e: self.log.error("Error: {0}".format(e)) def stop(self): if self.process and self.process.poll() is None: print("ngung hat") self.process.terminate() self.process.wait()
def initialize(self): i_have_a_question = IntentBuilder("IHaveAQuestion").require( "IHaveAQuestion").build() self.register_intent(i_have_a_question, self.handle_i_have_a_question_intent) self.audio_service = AudioService(self.bus)
def initialize(self): self.audioservice = AudioService(self.bus) launch_intent = IntentBuilder("music zing").require("Music").require( "zing").build() self.register_intent(launch_intent, self.handle_play_zing_mp3)
class GPBNewsSkill(MycroftSkill): """ Plays the latest news from Georgia Public Broadcasting Shamelessly stolen from the NPR News Skill with a few tweaks. Note that the latest mp3 may not be news, but could be an interview, etc. If you know a better source for the latest news mp3, let me know. """ def __init__(self): super(GPBNewsSkill, self).__init__(name="GPBNewsSkill") # This could change at any time and ruin the skill self.url_rss = "http://feeds.feedburner.com/gpbnews/GeorgiaRSS?format=xml" self.process = None self.audioservice = None def initialize(self): intent = IntentBuilder("GPBNewsIntent").require( "GPBNewsKeyword").build() self.register_intent(intent, self.handle_intent) intent = IntentBuilder("GPBNewsStopIntent") \ .require("GPBNewsStopVerb") \ .require("GPBNewsKeyword").build() self.register_intent(intent, self.handle_stop) if AudioService: self.audioservice = AudioService(self.emitter) def handle_intent(self, message): try: self.stop() self.speak_dialog("gpb.news") # Pause for the intro, then start the new stream time.sleep(4) feed = feedparser.parse(self.url_rss) next_link = feed["entries"][0]["links"][0]["href"] html = requests.get(next_link) # Find the first mp3 link mp3_find = re.search('href="(?P<mp3>.+\.mp3)"', html.content) # Replace https with http because AudioService can't handle it mp3_link = mp3_find.group("mp3").replace("https", "http") # if audio service module is available use it if self.audioservice: self.audioservice.play(mp3_link, message.data['utterance']) else: # otherwise use normal mp3 playback self.process = play_mp3(mp3_link) except Exception as e: self.speak_dialog("gpb.news.stop") LOGGER.error("Error: {0}".format(e)) def handle_stop(self, message): self.stop() self.speak_dialog('gpb.news.stop') def stop(self): if self.audioservice: self.audioservice.stop() else: if self.process and self.process.poll() is None: self.process.terminate() self.process.wait()
class CommonPlaySkill(MycroftSkill, ABC): def __init__(self, name=None, bus=None): super().__init__(name, bus) self.audioservice = None self.play_service_string = None def bind(self, bus): if bus: super().bind(bus) self.audioservice = AudioService(self.bus) self.add_event('play:query', self.__handle_play_query) self.add_event('play:start', self.__handle_play_start) def __handle_play_query(self, message): search_phrase = message.data["phrase"] # First, notify the requestor that we are attempting to handle # (this extends a timeout while this skill looks for a match) self.bus.emit(message.response({"phrase": search_phrase, "skill_id": self.skill_id, "searching": True})) # Now invoke the CPS handler to let the skill perform its search result = self.CPS_match_query_phrase(search_phrase) if result: match = result[0] level = result[1] callback = result[2] if len(result) > 2 else None confidence = self.__calc_confidence(match, search_phrase, level) self.bus.emit(message.response({"phrase": search_phrase, "skill_id": self.skill_id, "callback_data": callback, "conf": confidence})) else: # Signal we are done (can't handle it) self.bus.emit(message.response({"phrase": search_phrase, "skill_id": self.skill_id, "searching": False})) def __calc_confidence(self, match, phrase, level): # Assume the more of the words that get consumed, the better the match consumed_pct = len(match.split()) / len(phrase.split()) if consumed_pct > 1.0: consumed_pct = 1.0 if level == CPSMatchLevel.EXACT: return 1.0 elif level == CPSMatchLevel.MULTI_KEY: return 0.9 + (consumed_pct / 10) elif level == CPSMatchLevel.TITLE: return 0.8 + (consumed_pct / 10) elif level == CPSMatchLevel.ARTIST: return 0.7 + (consumed_pct / 10) elif level == CPSMatchLevel.CATEGORY: return 0.6 + (consumed_pct / 10) elif level == CPSMatchLevel.GENERIC: return 0.5 + (consumed_pct / 10) else: return 0.0 # should never happen def __handle_play_start(self, message): if message.data["skill_id"] != self.skill_id: # Not for this skill! return phrase = message.data["phrase"] data = message.data.get("callback_data") # Stop any currently playing audio if self.audioservice.is_playing: self.audioservice.stop() self.bus.emit(Message("mycroft.stop")) # Save for CPS_play() later, e.g. if phrase includes modifiers like # "... on the chromecast" self.play_service_string = phrase # Invoke derived class to provide playback data self.CPS_start(phrase, data) def CPS_play(self, *args, **kwargs): """ Begin playback of a media file or stream Normally this method will be invoked with somthing like: self.CPS_play(url) Advanced use can also include keyword arguments, such as: self.CPS_play(url, repeat=True) Args: same as the Audioservice.play method """ # Inject the user's utterance in case the audio backend wants to # interpret it. E.g. "play some rock at full volume on the stereo" if 'utterance' not in kwargs: kwargs['utterance'] = self.play_service_string self.audioservice.play(*args, **kwargs) def stop(self): if self.audioservice.is_playing: self.audioservice.stop() return True else: return False ###################################################################### # Abstract methods # All of the following must be implemented by a skill that wants to # act as a CommonPlay Skill @abstractmethod def CPS_match_query_phrase(self, phrase): """ Analyze phrase to see if it is a play-able phrase with this skill. Args: phrase (str): User phrase uttered after "Play", e.g. "some music" Returns: (match, CPSMatchLevel[, callback_data]) or None: Tuple containing a string with the appropriate matching phrase, the PlayMatch type, and optionally data to return in the callback if the match is selected. """ # Derived classes must implement this, e.g. # # if phrase in ["Zoosh"]: # return ("Zoosh", CPSMatchLevel.Generic, {"hint": "music"}) # or: # zoosh_song = find_zoosh(phrase) # if zoosh_song and "Zoosh" in phrase: # # "play Happy Birthday in Zoosh" # return ("Zoosh", CPSMatchLevel.MULTI_KEY, {"song": zoosh_song}) # elif zoosh_song: # # "play Happy Birthday" # return ("Zoosh", CPSMatchLevel.TITLE, {"song": zoosh_song}) # elif "Zoosh" in phrase # # "play Zoosh" # return ("Zoosh", CPSMatchLevel.GENERIC, {"cmd": "random"}) return None @abstractmethod def CPS_start(self, phrase, data): """ Begin playing whatever is specified in 'phrase' Args: phrase (str): User phrase uttered after "Play", e.g. "some music" data (dict): Callback data specified in match_query_phrase() """ # Derived classes must implement this, e.g. # self.CPS_play("http://zoosh.com/stream_music") pass