class SingingSkill(MycroftSkill): def __init__(self): super(SingingSkill, self).__init__(name="SingingSkill") self.process = None self.play_list = { 0: join(dirname(__file__), "popey-favourite.mp3"), 1: join(dirname(__file__), "popey-jackson.mp3"), 2: join(dirname(__file__), "popey-jerusalem.mp3"), 3: join(dirname(__file__), "popey-lose-yourself.mp3"), 4: join(dirname(__file__), "popey-lovemetender.mp3"), 5: join(dirname(__file__), "popey-rocketman.mp3"), } def initialize(self): self.audioservice = AudioService(self.bus) self.add_event("mycroft.sing", self.sing, False) def sing(self, message): self.audioservice.play(self.play_list[3]) @intent_handler(IntentBuilder('').require('Sing')) def handle_sing(self, message): path = random.choice(self.play_list) try: self.speak_dialog('singing') wait_while_speaking() self.audioservice.play(path) except Exception as e: self.log.error("Error: {0}".format(e)) def stop(self): if self.process and self.process.poll() is None: self.speak_dialog('singing.stop') self.process.terminate() self.process.wait()
class DaysUntilChristmasSkill(MycroftSkill): def __init__(self): super(DaysUntilChristmasSkill, self).__init__(name="DaysUntilChristmasSkill") self.audio_service = None def initialize(self): self.load_data_files(dirname(__file__)) self.audio_service = AudioService(self.emitter) self.register_intent_file('days.until.christmas.intent', self.handle_christmas) def handle_christmas(self, message): today = datetime.date.today() christmasDay = datetime.date(today.year, 12, 25) # in datetime atimetic, if a day is in the past, it is 'negative' or less # than today, or less than a day in the future # check to see if christmas is past :( if so, correct to next year :( if christmasDay < today: christmasDay = christmasDay.replace(year=today.year + 1) daysUntilChristmas = abs(christmasDay - today) self.audio_service.play('file//./music/carol_of_bells.mp3') self.speak("there are " + str(daysUntilChristmas.days) + " days until christmas") def stop(self): pass
class YoutubeAudioSkill(MycroftSkill): # The constructor of the skill, which calls Mycroft Skill's constructor def __init__(self): super(YoutubeAudioSkill, self).__init__(name="YoutubeAudioSkill") #define some variables, if I understand the syntax... self.search = "" self.url = "" def getResults(self, search, pos=0): #Let's use urllib to perform a query query = urllib.parse.quote(search) link = "https://www.youtube.com/results?search_query=" + query response = urllib.request.urlopen(link) html = response.read() soup = BeautifulSoup(html, 'html.parser') vids = soup.findAll(attrs={'class': 'yt-uix-tile-link'}) ytURL = 'https://www.youtube.com' + vids[pos]['href'] return ytURL def initialize(self): #Here we give the intent builder a pattern to follow. First we're telling it to require a keyword, then it'll need a query #The query isn't pulled from a .voc file - instead we use Regex, because we hate ourselves. #As a result there's a folder, much like the vocab folder, which contains the regex pattern (in this case search_query.rx). play_video_audio_intent = IntentBuilder( "PlayYoutubeAudioIntent").require("play_youtube_audio").require( "search_query").build() # /\ This bit tells the skill which vocab file it needs so that users can make it work self.register_intent(play_video_audio_intent, self.play_video_audio_intent) #We'll be using the audio service to play the resulting audio so we need to initialise it here, or something self.audio_service = AudioService(self.bus) def play_video_audio_intent(self, message): #the full utterance can be accessed like this: message.data.get('utterance') #Remember that search_query.rx file from earlier? If we want the contents of the query we supplied we can get it using message.data.get("search_query") #self.speak_dialog(message.data.get("search_query")) ytURL = self.getResults(message.data.get("search_query")) #It takes a moment for the command to be processed so probably best to prompt them! self.speak_dialog("downloading") #Get Sultan running that command sultan = Sultan() #first we remove any existing output file: rm("/tmp/output.wav") #ytURL = "https://www.youtube.com/watch?v=IPXIgEAGe4U" #double underscores are needed for the syntax here - they're an equivalent of a hyphen sultan.youtube__dl("-x --audio-format wav -o '/tmp/output.%(ext)s' " + ytURL).run() #Then we use the audio service to play our file: self.audio_service.play('file:///tmp/output.wav') # The "stop" method defines what Mycroft does when told to stop during # the skill's execution. In this case, since the skill's functionality # is extremely simple, the method just contains the keyword "pass", which # does nothing. def stop(self): pass
class NewsSkill(MycroftSkill): def __init__(self): super(NewsSkill, self).__init__(name="NewsSkill") self.pre_select = self.settings.get("pre_select") self.url_rss = self.settings.get("url_rss") self.process = None self.audioservice = None def initialize(self): self.pre_select = self.settings.get("pre_select") if "not_set" in self.pre_select: self.url_rss = self.settings.get("url_rss") else: self.url_rss = self.pre_select intent = IntentBuilder("NewsIntent").require("NewsKeyword").build() self.register_intent(intent, self.handle_intent) intent = IntentBuilder("NewsStopIntent") \ .require("NewsStopVerb") \ .require("NewsKeyword").build() self.register_intent(intent, self.handle_stop) if AudioService: self.audioservice = AudioService(self.emitter) def handle_intent(self, message): try: data = feedparser.parse(self.url_rss) # if news is already playing, stop it silently self.stop() self.speak_dialog('news') # Pause for the intro, then start the new stream time.sleep(4) url = re.sub('https', 'http', data['entries'][0]['links'][0]['href']) # if audio service module is available use it if self.audioservice: self.audioservice.play(url, message.data['utterance']) else: # othervice use normal mp3 playback self.process = play_mp3(url) except Exception as e: LOGGER.error("Error: {0}".format(e)) def handle_stop(self, message): self.stop() self.speak_dialog('news.stop') def stop(self): if self.audioservice: self.audioservice.stop() else: if self.process and self.process.poll() is None: self.process.terminate() self.process.wait()
class NewsSkill(MycroftSkill): def __init__(self): super(NewsSkill, self).__init__(name="NewsSkill") self.process = None self.audioservice = None def initialize(self): if AudioService: self.audioservice = AudioService(self.emitter) @property def url_rss(self): pre_select = self.settings.get("pre_select", "") url_rss = self.settings.get("url_rss") if "not_set" in pre_select: # Use a custom RSS URL url_rss = self.settings.get("url_rss") else: # Use the selected preset's URL url_rss = pre_select if not url_rss and 'url_rss' in self.config: url_rss = self.config['url_rss'] return url_rss @intent_handler(IntentBuilder("").require("Play").require("News")) def handle_intent(self, message): try: data = feedparser.parse(self.url_rss) # Stop anything already playing self.stop() self.speak_dialog('news') wait_while_speaking() # After the intro, start the news stream url = re.sub('https', 'http', data['entries'][0]['links'][0]['href']) # if audio service module is available use it if self.audioservice: self.audioservice.play(url, message.data['utterance']) else: # othervice use normal mp3 playback self.process = play_mp3(url) except Exception as e: LOG.error("Error: {0}".format(e)) def stop(self): if self.audioservice: self.audioservice.stop() else: if self.process and self.process.poll() is None: self.process.terminate() self.process.wait()
class IceBarrage(MycroftSkill): def __init__(self): MycroftSkill.__init__(self) def initialize(self): self.audio_service = AudioService(self.bus) @intent_file_handler('barrage.ice.intent') def handle_barrage_ice(self, message): self.speak_dialog('barrage.ice') self.audio_service.play('file:///home/pi/ice_barrage.wav')
class ControlFurby(MycroftSkill): def __init__(self): MycroftSkill.__init__(self) def initialize(self): self.audio_service = AudioService(self.bus) @intent_file_handler('autumn.adventure.intent') def autumn_adventure(self, message): self.speak_dialog('autumn.adventure') story = "skills/winston-more-cowbell.cdoebler1/music/autumn_adventure.mp3" self.audio_service.play(story) def stop(self): pass
class NewsSkill(MycroftSkill): def __init__(self): super(NewsSkill, self).__init__(name="NewsSkill") self.npr = "http://www.npr.org/rss/podcast.php?id=500005" self.fox = "http://feeds.foxnewsradio.com/FoxNewsRadio" self.cbc = "http://www.cbc.ca/podcasting/includes/hourlynews.xml" self.audio = None self.playing = False # TODO get from config self.default = "random" def initialize(self): intent = IntentBuilder("NewsIntent").require("NewsKeyword").optionally( "NewsSource").build() self.register_intent(intent, self.handle_intent) self.audio = AudioService(self.emitter) def handle_intent(self, message): self.playing = True sauce = message.data.get("NewsSource") if sauce: self.default = sauce if self.default == "fox": url_rss = self.fox elif self.default == "npr": url_rss = self.npr elif self.default == "cbc": url_rss = self.cbc else: url_rss = random.choice([self.fox, self.cbc, self.npr]) try: data = feedparser.parse(url_rss) self.speak_dialog('news', {"source": self.default}) time.sleep(3) self.audio.play( re.sub('https', 'http', data['entries'][0]['links'][0]['href'])) except Exception as e: LOGGER.error("Error: {0}".format(e)) self.playing = False def stop(self): if self.playing: self.speak_dialog("news.stop") self.playing = False
class TranslateSkill(MycroftSkill): def __init__(self): super(TranslateSkill, self).__init__('speech_client') def initialize(self): intent = IntentBuilder('TranslateIntent') \ .require('TranslateKeyword') \ .require('LanguageKeyword') \ .require('phrase') \ .build() self.register_intent(intent, self.handle_translate) intent = IntentBuilder('TranslateToIntent') \ .require('TranslateKeyword') \ .require('translate') \ .require('ToKeyword') \ .require('LanguageKeyword') \ .build() self.register_intent(intent, self.handle_translate_to) self.audio = AudioService(self.emitter) def handle_translate(self, message): lang = message.data.get("LanguageKeyword") sentence = message.data.get("phrase") translated = translate(sentence, lang) self.say(translated, lang) def handle_translate_to(self, message): lang = message.data.get("LanguageKeyword") sentence = message.data.get("translate") translated = translate(sentence, lang) self.say(translated, lang) def say(self, sentence, lang): # sentence = unicode(sentence, "utf-8") sentence = unicodedata.normalize('NFKD', sentence).encode('ascii', 'ignore') self.log.info("TRANSLATED PHRASE:", sentence) get_sentence = 'wget -q -U Mozilla -O /tmp/translated.mp3 "https://translate.google.com/translate_tts?tl=' + lang + '&q=' + sentence + '&client=tw-ob' + '"' os.system(get_sentence) self.audio.play("/tmp/translated.mp3")
class AngryBeanieSkill(MycroftSkill): def __init__(self): super(AngryBeanieSkill, self).__init__(name="AngryBeanieSkill") self.audioservice = None def initialize(self): get_podcasts_intent = IntentBuilder("GetPodcastsIntent").require("GetPodcastsKeyword").build() self.register_intent(get_podcasts_intent, self.handle_get_podcasts_intent) get_episodes_intent = IntentBuilder("GetEpisodesIntent").require("GetEpisodesKeyword").require("ShowName").build() self.register_intent(get_episodes_intent, self.handle_get_episodes_intent) get_latest_episode_intent = IntentBuilder("GetLatestEpisodeIntent").require("GetLatestEpisodeKeyword").require("ShowName").build() self.register_intent(get_latest_episode_intent, self.handle_get_latest_episode_intent) stop_latest_episode_intent = IntentBuilder("StopAngryBeanieIntent").require("AngryBeanieStopVerb").build() self.register_intent(stop_latest_episode_intent, self.handle_stop) if AudioService: self.audioservice = AudioService(self.emitter) def handle_get_podcasts_intent(self, message): self.speak_dialog("podcasts") def handle_get_episodes_intent(self, message): show = message.data.get("ShowName") episodes = getEpisodes(show.encode('utf-8')) self.speak_dialog("episodes", {'show': show.encode('utf-8'), 'episodes': episodes.encode('utf-8')}) def handle_get_latest_episode_intent(self, message): show = message.data.get("ShowName") episode = getLatestEpisode(show) self.speak("Playing episode") if self.audioservice: self.audioservice.play(episode, message.data['utterance']) else: # othervice use normal mp3 playback self.process = play_mp3(episode) def handle_stop(self, message): self.stop() def stop(self): if self.process and self.process.poll() is None: self.process.terminate() self.process.wait() self.speak_dialog('angrybeanie.stop.playing')
class Emby(MycroftSkill): def __init__(self): MycroftSkill.__init__(self) self.audio_service = None self.emby_croft = None def initialize(self): try: self.emby_croft = EmbyCroft( self.settings["hostname"] + ":" + str(self.settings["port"]), self.settings["username"], self.settings["password"]) except Exception as e: self.log.log(20, e) self.speak('Failed to connect to Emby. Please check your' ' configuration at Mycroft.ai') @intent_file_handler('emby.intent') def handle_emby(self, message): self.log.log(20, message.data) media = message.data['media'] # setup audio service self.audio_service = AudioService(self.bus) songs = [] try: songs = self.emby_croft.instant_mix_for_media(media) self.audio_service.play(songs) self.speak_playing(media) except Exception as e: self.log.log(20, e) self.speak("Unable to find or play " + media + ". Please try again") def speak_playing(self, media): data = dict() data['media'] = media self.speak_dialog('emby', data) def stop(self): pass
class TestAudioServicePlay(TestCase): def setUp(self): self.bus = mock.Mock(name='bus') self.audioservice = AudioService(self.bus) def test_proper_uri(self): self.audioservice.play('file:///hello_nasty.mp3') message = self.bus.emit.call_args_list[-1][0][0] self.assertEqual(message.msg_type, 'mycroft.audio.service.play') self.assertEqual(message.data['tracks'], ['file:///hello_nasty.mp3']) self.assertEqual(message.data['repeat'], False) def test_path(self): self.audioservice.play('/hello_nasty.mp3') message = self.bus.emit.call_args_list[-1][0][0] self.assertEqual(message.msg_type, 'mycroft.audio.service.play') self.assertEqual(message.data['tracks'], ['file:///hello_nasty.mp3']) self.assertEqual(message.data['repeat'], False) def test_tuple(self): """Test path together with mimetype.""" self.audioservice.play(('/hello_nasty.mp3', 'audio/mp3')) message = self.bus.emit.call_args_list[-1][0][0] self.assertEqual(message.msg_type, 'mycroft.audio.service.play') self.assertEqual(message.data['tracks'], [('file:///hello_nasty.mp3', 'audio/mp3')]) self.assertEqual(message.data['repeat'], False) def test_invalid(self): """Test play request with invalid type.""" with self.assertRaises(ValueError): self.audioservice.play(12) def test_extra_arguments(self): """Test sending along utterance and setting repeat.""" self.audioservice.play('/hello_nasty.mp3', 'on vlc', True) message = self.bus.emit.call_args_list[-1][0][0] self.assertEqual(message.msg_type, 'mycroft.audio.service.play') self.assertEqual(message.data['tracks'], ['file:///hello_nasty.mp3']) self.assertEqual(message.data['repeat'], True) self.assertEqual(message.data['utterance'], 'on vlc')
def make_sound(self, note): sampleRate = 48000.0 # hertz duration = 2.0 frequency = self.NOTES[note] wavef = wave.open('/tmp/sound.wav', 'w') wavef.setnchannels(1) # mono wavef.setsampwidth(2) wavef.setframerate(sampleRate) for i in range(int(duration * sampleRate)): value = int( 32767.0 * math.cos(frequency * math.pi * float(i) / float(sampleRate))) data = struct.pack('<h', value) wavef.writeframesraw(data) wavef.close() AudioService.play(self, tracks='file:///tmp/sound.wav') time.sleep(duration) os.remove('/tmp/sound.wav')
class PlaySomeMusicSkill(MycroftSkill): def __init__(self): super(PlaySomeMusicSkill, self).__init__(name="PlaySomeMusicSkill") self.audioservice = None self.process = None def initialize(self): intent = IntentBuilder("PlaySomeMusicIntent").require( "PlaySomeMusicKeyword").build() self.register_intent(intent, self.handle_intent) intent = IntentBuilder("PlaySomeMusicStopIntent") \ .require("PlaySomeMusicStopVerb") \ .require("PlaySomeMusicKeyword").build() self.register_intent(intent, self.handle_stop) if AudioService: self.audioservice = AudioService(self.emitter) def handle_intent(self, message): self.stop() self.speak_dialog('play.some.music') time.sleep(4) if self.audioservice: self.audioservice.play(self.settings['station_url']) else: # othervice use normal mp3 playback self.process = play_mp3(self.settings['station_url']) def handle_stop(self, message): self.stop() self.speak_dialog('play.some.music.stop') def stop(self): if self.audioservice: self.audioservice.stop() else: if self.process and self.process.poll() is None: self.process.terminate() self.process.wait()
class ChickenAssistant(MycroftSkill): def __init__(self): MycroftSkill.__init__(self) self.process = None self.play_list = { 0: join(dirname(__file__), "chicken_response_01.mp3"), 1: join(dirname(__file__), "chicken_response_02.mp3") } def initialize(self): self.audio_service = AudioService(self.bus) #self.add_event("mycroft.sing", self.sing, False) def sing(self, message): self.audioservice.play(self.play_list[0]) @intent_handler('assistant.chicken.intent') def handle_assistant_chicken(self, message): path = random.choice(self.play_list) try: #self.speak_dialog('assistant.chicken') #wait_while_speaking() self.audio_service.play(path) servo = maestro.Controller('/dev/ttyAMA0') servo.runScriptSub(0) time.sleep(2) servo.runScriptSub(1) servo.close except Exception as e: self.log.error("Error: {0}".format(e)) def stop(self): if self.process and self.process.poll() is None: self.process.terminate() self.process.wait()
class FirstTestSkill(MycroftSkill): def __init__(self): MycroftSkill.__init__(self) def initialize(self): self.setting_mp3_path = self.settings.get('path_for_mp3') self.audio_service = AudioService(self.bus) @intent_file_handler('test.first.intent') def handle_test_first(self, message): if self.setting_mp3_path is not None: settingTxt = "Settings is set to " + str(self.setting_mp3_path) self.speak(settingTxt) mp3_path = "file://" + str(self.setting_mp3_path) self.audio_service.play(mp3_path) else: self.speak("Settings for path_for_mp3 not found") mp3_path = "file:///home/jsauwen/Musik/01 Mars.mp3" self.audio_service.play(mp3_path) self.speak_dialog('test.first')
class Meowcroft(MycroftSkill): def __init__(self): MycroftSkill.__init__(self) def initialize(self): self.audioservice = AudioService(self.bus) @intent_file_handler('meowcroft.intent') def handle_meowcroft(self, message): self.audioservice.play('file:///opt/mycroft/skills/meowcroft-skill.hotcakessanderson/catmeow1.mp3') self.audioservice.play('file:///opt/mycroft/skills/meowcroft-skill.hotcakessanderson/catmeow2.mp3') self.audioservice.play('file:///opt/mycroft/skills/meowcroft-skill.hotcakessanderson/catmeow3.mp3') self.audioservice.play('file:///opt/mycroft/skills/meowcroft-skill.hotcakessanderson/catmeow4.mp3') self.audioservice.play('file:///opt/mycroft/skills/meowcroft-skill.hotcakessanderson/catmeow5.mp3') self.audioservice.play('file:///opt/mycroft/skills/meowcroft-skill.hotcakessanderson/catmeow6.mp3')
def play(self, tracks=None, utterance=''): utterance = utterance or self.backend AudioService.play(self, tracks, utterance)
class PodcastSkill(MycroftSkill): # The constructor of the skill, which calls MycroftSkill's constructor def __init__(self): super(PodcastSkill, self).__init__(name="PodcastSkill") self.process = None self.audioservice = None self.listen_url = "" def initialize(self): play_podcast_intent = IntentBuilder("PlayPodcastIntent").require( "PlayPodcastKeyword").build() self.register_intent(play_podcast_intent, self.handle_play_podcast_intent) new_episode_intent = IntentBuilder("NewEpisodeIntent").require( "NewEpisodeKeyword").build() self.register_intent(new_episode_intent, self.handle_new_episode_intent) if AudioService: self.audioservice = AudioService(self.emitter) def handle_play_podcast_intent(self, message): utter = message.data['utterance'] podcast_names = [ self.settings["nameone"], self.settings["nametwo"], self.settings["namethree"] ] podcast_urls = [ self.settings["feedone"], self.settings["feedtwo"], self.settings["feedthree"] ] self.listen_url = "" for i in range(0, len(podcast_names)): if podcast_names[i] == "": continue if podcast_names[i] in utter: self.listen_url = podcast_urls[i] #return false if Mycroft could not hear the name of the podcast if self.listen_url == "": self.speak_dialog('nomatch') return False self.speak_dialog('latest') time.sleep(3) #data['entries'][0]['links'][0]['href'] #parse the feed URL data = feedparser.parse(self.listen_url) episode = (data["entries"][0]["media_content"][0]["url"]) episode_title = (data['entries'][0]['title']) # if audio service module is available use it if self.audioservice: self.audioservice.play(episode, message.data['utterance']) self.enclosure.mouth_text(episode_title) def handle_new_episode_intent(self, message): utter = message.data['utterance'] json_path = join(self._dir, "latest_check.json") with open(json_path, 'r') as read_file: last_check = json.load(read_file) podcast_names = [ self.settings["nameone"], self.settings["nametwo"], self.settings["namethree"] ] podcast_urls = [ self.settings["feedone"], self.settings["feedtwo"], self.settings["feedthree"] ] #check if there are new episodes compared to the last check new_episodes = [] for i in range(0, len(podcast_urls)): if not podcast_urls[i]: continue data = feedparser.parse(podcast_urls[i]) last_episode = (data['entries'][0]['title']) if last_check["latest_episodes"][i] != last_episode: last_check["latest_episodes"][i] = last_episode new_episodes.append(i) #if the new episode list is empty, there are no new episodes if len(new_episodes) == 0: speech_string = "There are no new episodes of your favourite podcasts" else: #create the string for mycroft to say speech_string = "There are new episodes of " for i in range(0, len(new_episodes)): #if the podcast is the last in a list add "and" before the podcast name if i == (len(new_episodes) - 1) and i > 0: speech_string = speech_string + "and " + podcast_names[ new_episodes[i]] + " " else: speech_string = speech_string + podcast_names[ new_episodes[i]] + ", " #update the latest check file with open(join(self._dir, "latest_check.json"), 'w') as write_file: json.dump(last_check, write_file) self.speak(speech_string) def stop(self): pass
class CommonPlaySkill(MycroftSkill, ABC): """ To integrate with the common play infrastructure of Mycroft skills should use this base class and override the two methods `CPS_match_query_phrase` (for checking if the skill can play the utterance) and `CPS_start` for launching the media. The class makes the skill available to queries from the mycroft-playback-control skill and no special vocab for starting playback is needed. """ def __init__(self, name=None, bus=None): super().__init__(name, bus) self.audioservice = None self.play_service_string = None # "MusicServiceSkill" -> "Music Service" spoken = name or self.__class__.__name__ self.spoken_name = re.sub(r"([a-z])([A-Z])", r"\g<1> \g<2>", spoken.replace("Skill", "")) # NOTE: Derived skills will likely want to override self.spoken_name # with a translatable name in their initialize() method. def bind(self, bus): """ Overrides the normal bind method. Adds handlers for play:query and play:start messages allowing interaction with the playback control skill. This is called automatically during setup, and need not otherwise be used. """ if bus: super().bind(bus) self.audioservice = AudioService(self.bus) self.add_event('play:query', self.__handle_play_query) self.add_event('play:start', self.__handle_play_start) def __handle_play_query(self, message): search_phrase = message.data["phrase"] # First, notify the requestor that we are attempting to handle # (this extends a timeout while this skill looks for a match) self.bus.emit( message.response({ "phrase": search_phrase, "skill_id": self.skill_id, "searching": True })) # Now invoke the CPS handler to let the skill perform its search result = self.CPS_match_query_phrase(search_phrase) if result: match = result[0] level = result[1] callback = result[2] if len(result) > 2 else None confidence = self.__calc_confidence(match, search_phrase, level) self.bus.emit( message.response({ "phrase": search_phrase, "skill_id": self.skill_id, "callback_data": callback, "service_name": self.spoken_name, "conf": confidence })) else: # Signal we are done (can't handle it) self.bus.emit( message.response({ "phrase": search_phrase, "skill_id": self.skill_id, "searching": False })) def __calc_confidence(self, match, phrase, level): # "play pandora" # "play pandora is my girlfriend" # "play tom waits on pandora" # Assume the more of the words that get consumed, the better the match consumed_pct = len(match.split()) / len(phrase.split()) if consumed_pct > 1.0: consumed_pct = 1.0 / consumed_pct # deal with over/under-matching # We'll use this to modify the level, but don't want it to allow a # match to jump to the next match level. So bonus is 0 - 0.05 (1/20) bonus = consumed_pct / 20.0 if level == CPSMatchLevel.EXACT: return 1.0 elif level == CPSMatchLevel.MULTI_KEY: return 0.9 + bonus elif level == CPSMatchLevel.TITLE: return 0.8 + bonus elif level == CPSMatchLevel.ARTIST: return 0.7 + bonus elif level == CPSMatchLevel.CATEGORY: return 0.6 + bonus elif level == CPSMatchLevel.GENERIC: return 0.5 + bonus else: return 0.0 # should never happen def __handle_play_start(self, message): if message.data["skill_id"] != self.skill_id: # Not for this skill! return phrase = message.data["phrase"] data = message.data.get("callback_data") # Stop any currently playing audio if self.audioservice.is_playing: self.audioservice.stop() self.bus.emit(Message("mycroft.stop")) # Save for CPS_play() later, e.g. if phrase includes modifiers like # "... on the chromecast" self.play_service_string = phrase # Invoke derived class to provide playback data self.CPS_start(phrase, data) def CPS_play(self, *args, **kwargs): """ Begin playback of a media file or stream Normally this method will be invoked with somthing like: self.CPS_play(url) Advanced use can also include keyword arguments, such as: self.CPS_play(url, repeat=True) Args: same as the Audioservice.play method """ # Inject the user's utterance in case the audio backend wants to # interpret it. E.g. "play some rock at full volume on the stereo" if 'utterance' not in kwargs: kwargs['utterance'] = self.play_service_string self.audioservice.play(*args, **kwargs) def stop(self): if self.audioservice.is_playing: self.audioservice.stop() return True else: return False ###################################################################### # Abstract methods # All of the following must be implemented by a skill that wants to # act as a CommonPlay Skill @abstractmethod def CPS_match_query_phrase(self, phrase): """ Analyze phrase to see if it is a play-able phrase with this skill. Args: phrase (str): User phrase uttered after "Play", e.g. "some music" Returns: (match, CPSMatchLevel[, callback_data]) or None: Tuple containing a string with the appropriate matching phrase, the PlayMatch type, and optionally data to return in the callback if the match is selected. """ # Derived classes must implement this, e.g. # # if phrase in ["Zoosh"]: # return ("Zoosh", CPSMatchLevel.Generic, {"hint": "music"}) # or: # zoosh_song = find_zoosh(phrase) # if zoosh_song and "Zoosh" in phrase: # # "play Happy Birthday in Zoosh" # return ("Zoosh", CPSMatchLevel.MULTI_KEY, {"song": zoosh_song}) # elif zoosh_song: # # "play Happy Birthday" # return ("Zoosh", CPSMatchLevel.TITLE, {"song": zoosh_song}) # elif "Zoosh" in phrase # # "play Zoosh" # return ("Zoosh", CPSMatchLevel.GENERIC, {"cmd": "random"}) return None @abstractmethod def CPS_start(self, phrase, data): """ Begin playing whatever is specified in 'phrase' Args: phrase (str): User phrase uttered after "Play", e.g. "some music" data (dict): Callback data specified in match_query_phrase() """ # Derived classes must implement this, e.g. # self.CPS_play("http://zoosh.com/stream_music") pass
class CommonPlaySkill(MycroftSkill, ABC): def __init__(self, name=None, bus=None): super().__init__(name, bus) self.audioservice = None self.play_service_string = None def bind(self, bus): if bus: super().bind(bus) self.audioservice = AudioService(self.bus) self.add_event('play:query', self.__handle_play_query) self.add_event('play:start', self.__handle_play_start) def __handle_play_query(self, message): search_phrase = message.data["phrase"] # First, notify the requestor that we are attempting to handle # (this extends a timeout while this skill looks for a match) self.bus.emit(message.response({"phrase": search_phrase, "skill_id": self.skill_id, "searching": True})) # Now invoke the CPS handler to let the skill perform its search result = self.CPS_match_query_phrase(search_phrase) if result: match = result[0] level = result[1] callback = result[2] if len(result) > 2 else None confidence = self.__calc_confidence(match, search_phrase, level) self.bus.emit(message.response({"phrase": search_phrase, "skill_id": self.skill_id, "callback_data": callback, "conf": confidence})) else: # Signal we are done (can't handle it) self.bus.emit(message.response({"phrase": search_phrase, "skill_id": self.skill_id, "searching": False})) def __calc_confidence(self, match, phrase, level): # Assume the more of the words that get consumed, the better the match consumed_pct = len(match.split()) / len(phrase.split()) if consumed_pct > 1.0: consumed_pct = 1.0 if level == CPSMatchLevel.EXACT: return 1.0 elif level == CPSMatchLevel.MULTI_KEY: return 0.9 + (consumed_pct / 10) elif level == CPSMatchLevel.TITLE: return 0.8 + (consumed_pct / 10) elif level == CPSMatchLevel.ARTIST: return 0.7 + (consumed_pct / 10) elif level == CPSMatchLevel.CATEGORY: return 0.6 + (consumed_pct / 10) elif level == CPSMatchLevel.GENERIC: return 0.5 + (consumed_pct / 10) else: return 0.0 # should never happen def __handle_play_start(self, message): if message.data["skill_id"] != self.skill_id: # Not for this skill! return phrase = message.data["phrase"] data = message.data.get("callback_data") # Stop any currently playing audio if self.audioservice.is_playing: self.audioservice.stop() self.bus.emit(Message("mycroft.stop")) # Save for CPS_play() later, e.g. if phrase includes modifiers like # "... on the chromecast" self.play_service_string = phrase # Invoke derived class to provide playback data self.CPS_start(phrase, data) def CPS_play(self, *args, **kwargs): """ Begin playback of a media file or stream Normally this method will be invoked with somthing like: self.CPS_play(url) Advanced use can also include keyword arguments, such as: self.CPS_play(url, repeat=True) Args: same as the Audioservice.play method """ # Inject the user's utterance in case the audio backend wants to # interpret it. E.g. "play some rock at full volume on the stereo" if 'utterance' not in kwargs: kwargs['utterance'] = self.play_service_string self.audioservice.play(*args, **kwargs) def stop(self): if self.audioservice.is_playing: self.audioservice.stop() return True else: return False ###################################################################### # Abstract methods # All of the following must be implemented by a skill that wants to # act as a CommonPlay Skill @abstractmethod def CPS_match_query_phrase(self, phrase): """ Analyze phrase to see if it is a play-able phrase with this skill. Args: phrase (str): User phrase uttered after "Play", e.g. "some music" Returns: (match, CPSMatchLevel[, callback_data]) or None: Tuple containing a string with the appropriate matching phrase, the PlayMatch type, and optionally data to return in the callback if the match is selected. """ # Derived classes must implement this, e.g. # # if phrase in ["Zoosh"]: # return ("Zoosh", CPSMatchLevel.Generic, {"hint": "music"}) # or: # zoosh_song = find_zoosh(phrase) # if zoosh_song and "Zoosh" in phrase: # # "play Happy Birthday in Zoosh" # return ("Zoosh", CPSMatchLevel.MULTI_KEY, {"song": zoosh_song}) # elif zoosh_song: # # "play Happy Birthday" # return ("Zoosh", CPSMatchLevel.TITLE, {"song": zoosh_song}) # elif "Zoosh" in phrase # # "play Zoosh" # return ("Zoosh", CPSMatchLevel.GENERIC, {"cmd": "random"}) return None @abstractmethod def CPS_start(self, phrase, data): """ Begin playing whatever is specified in 'phrase' Args: phrase (str): User phrase uttered after "Play", e.g. "some music" data (dict): Callback data specified in match_query_phrase() """ # Derived classes must implement this, e.g. # self.CPS_play("http://zoosh.com/stream_music") pass
class FartingSkill(MycroftSkill): def __init__(self): super(FartingSkill, self).__init__(name="FartingSkill") self.audioservice = None self.random_farting = False # flag to indicate whether random farting mode is active self.counter = 0 # variable to increment to make the scheduled event unique # Search the sounds directory for sound files and load into a list. valid_codecs = ['.mp3'] #, '.wav'] self.path_to_sound_files = path.join(abspath(dirname(__file__)), 'sounds') self.sound_files = [ f for f in listdir(self.path_to_sound_files) if splitext(f)[1] in valid_codecs ] # cater for the picroft platform which behaves a bit differently from the mark1 self.platform = "unknown" config = Configuration.get([SYSTEM_CONFIG, USER_CONFIG], cache=False) if "enclosure" in config: self.platform = config.get("enclosure").get("platform", "unknown") def initialize(self): self.register_intent_file('accuse.intent', self.handle_accuse_intent) self.register_intent_file('request.intent', self.handle_request_intent) self.register_intent_file('random.intent', self.handle_random_intent) if AudioService: self.audioservice = AudioService(self.emitter) def handle_request_intent(self, message): # play a randomly selected sound file self.fart_and_comment() def handle_fart_event(self, message): # create a scheduled event to fart at a random interval between 1 minute and half an hour LOGGER.info("Farting skill: Handling fart event") if not self.random_farting: return # self.remove_event('random_fart') # not currently working - using cancel_schedule_event() instead self.cancel_scheduled_event('randon_fart' + str(self.counter)) self.counter += 1 self.schedule_event(self.handle_fart_event, datetime.now() + timedelta(seconds=random.randrange(60, 1800)), name='random_fart' + str(self.counter)) self.fart_and_comment() def handle_accuse_intent(self, message): # make a comment when accused of farting self.speak_dialog('apologise') def handle_random_intent(self, message): # initiate random farting LOGGER.info("Farting skill: Triggering random farting") self.speak("got it") time.sleep(.5) self.speak("don't worry, I'll be very discrete") self.random_farting = True self.schedule_event(self.handle_fart_event, datetime.now() + timedelta(seconds=random.randrange(30, 60)), name='random_fart' + str(self.counter)) def fart_and_comment(self): # play a randomly selected fart noise and make a comment LOGGER.info("Farting skill: Fart and comment") tag = TinyTag.get( path.join(self.path_to_sound_files, random.choice(self.sound_files))) self.audioservice.play( path.join(self.path_to_sound_files, random.choice(self.sound_files))) LOGGER.info("Fart duration " + str(int(tag.duration))) delay = 1 # treat the picroft platform a bit differently if self.platform == 'picroft': delay = 6 time.sleep(int(tag.duration) + delay) self.speak_dialog('noise') @intent_handler( IntentBuilder('halt_farting').require('halt').require('farting')) def halt_farting(self, message): # stop farting LOGGER.info("Farting skill: Stopping") # if in random fart mode, cancel the scheduled event if self.random_farting: LOGGER.info("Farting skill: Stopping random farting event") self.speak_dialog('cancel') self.audioservice.stop() self.random_farting = False # self.remove_event('random_fart') # not currently working - using cancel_schedule_event() instead self.cancel_scheduled_event('random_fart' + str(self.counter)) def stop(self): pass
class WhitenoiseSkill(MycroftSkill): def __init__(self): super(WhitenoiseSkill, self).__init__(name="WhitenoiseSkill") self.process = None self.start_time = 0 self.last_index = 24 # index of last pixel in countdowns self.settings["duration"] = -1 # default = unknown self.play_list_all = { 0: join(dirname(__file__), "popey-whitenoise.mp3"), 1: join(dirname(__file__), "popey-whitenoiseocean.mp3"), 2: join(dirname(__file__), "popey-whitenoiserain.mp3"), 3: join(dirname(__file__), "popey-whitenoisewave.mp3"), } self.play_list_ocean = { 0: join(dirname(__file__), "popey-whitenoiseocean.mp3"), } self.play_list_rain = { 0: join(dirname(__file__), "popey-whitenoiserain.mp3"), } self.play_list_wave = { 0: join(dirname(__file__), "popey-whitenoisewave.mp3"), } def initialize(self): self.audioservice = AudioService(self.bus) self.add_event("mycroft.whitenoise", self.whitenoise, False) def whitenoise(self, message): self.process = play_mp3(self.play_list_all[0]) @staticmethod def stop_process(process): if process.poll() is None: # None means still running process.terminate() # No good reason to wait, plus it interferes with # how stop button on the Mark 1 operates. # process.wait() return True else: return False # Show a countdown using the eyes def render_countdown(self, r_fore, g_fore, b_fore): display_owner = self.enclosure.display_manager.get_active() if display_owner == "": # Initialization, first time we take ownership self.enclosure.mouth_reset() # clear any leftover bits self.enclosure.eyes_color(r_fore, g_fore, b_fore) # foreground self.last_index = 24 if display_owner == "AudioRecordSkill": remaining_pct = self.remaining_time() / self.settings["duration"] fill_to_index = int(24 * remaining_pct) while self.last_index > fill_to_index: if self.last_index < 24 and self.last_index > -1: # fill background with gray self.enclosure.eyes_setpixel(self.last_index, 64, 64, 64) self.last_index -= 1 @intent_handler(IntentBuilder('').require('Whitenoise')) def handle_whitenoise(self, message): path = random.choice(self.play_list) try: self.speak_dialog('whitenoise.response') wait_while_speaking() self.audioservice.play(path) except Exception as e: self.log.error("Error: {0}".format(e)) @intent_handler(IntentBuilder('').require('whitenoise.time.intent')) def handle_whitenoise_time(self, message): utterance = message.data.get('utterance') # Calculate how long to record self.start_time = now_local() stop_time, _ = extract_datetime(utterance, lang=self.lang) self.settings["duration"] = (stop_time - self.start_time).total_seconds() if self.settings["duration"] <= 0: self.settings["duration"] = 60 # default recording duration # Initiate white noise path = random.choice(self.play_list_all) try: time_for = nice_duration(self, self.settings["duration"], lang=self.lang) self.speak_dialog('whitenoise.response.time', {'duration': time_for}) wait_while_speaking() self.audioservice.play(path) # self.process = play_mp3(self.play_list_all[0]) self.enclosure.eyes_color(255, 0, 0) # set color red self.last_index = 24 self.schedule_repeating_event(self.recording_feedback, None, 1, name='RecordingFeedback') except Exception as e: self.log.error("Error: {0}".format(e)) def recording_feedback(self, message): if not self.process: self.end_whitenoise() return # Show recording countdown self.render_countdown(255, 0, 0) def end_whitenoise(self): if self.process: # Stop recording self.stop_process(self.process) self.process = None # Calc actual recording duration self.settings["duration"] = (now_local() - self.start_time).total_seconds() def stop(self): if self.process and self.process.poll() is None: self.speak_dialog('whitenoise.stop') self.process.terminate() self.process.wait()
class CommonPlaySkill(MycroftSkill, ABC): """ To integrate with the common play infrastructure of Mycroft skills should use this base class and override the two methods `CPS_match_query_phrase` (for checking if the skill can play the utterance) and `CPS_start` for launching the media. The class makes the skill available to queries from the mycroft-playback-control skill and no special vocab for starting playback is needed. """ def __init__(self, name=None, bus=None): super().__init__(name, bus) self.audioservice = None self.play_service_string = None # "MusicServiceSkill" -> "Music Service" spoken = name or self.__class__.__name__ self.spoken_name = re.sub(r"([a-z])([A-Z])", r"\g<1> \g<2>", spoken.replace("Skill", "")) # NOTE: Derived skills will likely want to override self.spoken_name # with a translatable name in their initialize() method. def bind(self, bus): """ Overrides the normal bind method. Adds handlers for play:query and play:start messages allowing interaction with the playback control skill. This is called automatically during setup, and need not otherwise be used. """ if bus: super().bind(bus) self.audioservice = AudioService(self.bus) self.add_event('play:query', self.__handle_play_query) self.add_event('play:start', self.__handle_play_start) def __handle_play_query(self, message): search_phrase = message.data["phrase"] # First, notify the requestor that we are attempting to handle # (this extends a timeout while this skill looks for a match) self.bus.emit(message.response({"phrase": search_phrase, "skill_id": self.skill_id, "searching": True})) # Now invoke the CPS handler to let the skill perform its search result = self.CPS_match_query_phrase(search_phrase) if result: match = result[0] level = result[1] callback = result[2] if len(result) > 2 else None confidence = self.__calc_confidence(match, search_phrase, level) self.bus.emit(message.response({"phrase": search_phrase, "skill_id": self.skill_id, "callback_data": callback, "service_name": self.spoken_name, "conf": confidence})) else: # Signal we are done (can't handle it) self.bus.emit(message.response({"phrase": search_phrase, "skill_id": self.skill_id, "searching": False})) def __calc_confidence(self, match, phrase, level): # "play pandora" # "play pandora is my girlfriend" # "play tom waits on pandora" # Assume the more of the words that get consumed, the better the match consumed_pct = len(match.split()) / len(phrase.split()) if consumed_pct > 1.0: consumed_pct = 1.0 / consumed_pct # deal with over/under-matching # We'll use this to modify the level, but don't want it to allow a # match to jump to the next match level. So bonus is 0 - 0.05 (1/20) bonus = consumed_pct / 20.0 if level == CPSMatchLevel.EXACT: return 1.0 elif level == CPSMatchLevel.MULTI_KEY: return 0.9 + bonus elif level == CPSMatchLevel.TITLE: return 0.8 + bonus elif level == CPSMatchLevel.ARTIST: return 0.7 + bonus elif level == CPSMatchLevel.CATEGORY: return 0.6 + bonus elif level == CPSMatchLevel.GENERIC: return 0.5 + bonus else: return 0.0 # should never happen def __handle_play_start(self, message): if message.data["skill_id"] != self.skill_id: # Not for this skill! return phrase = message.data["phrase"] data = message.data.get("callback_data") # Stop any currently playing audio if self.audioservice.is_playing: self.audioservice.stop() self.bus.emit(Message("mycroft.stop")) # Save for CPS_play() later, e.g. if phrase includes modifiers like # "... on the chromecast" self.play_service_string = phrase # Invoke derived class to provide playback data self.CPS_start(phrase, data) def CPS_play(self, *args, **kwargs): """ Begin playback of a media file or stream Normally this method will be invoked with somthing like: self.CPS_play(url) Advanced use can also include keyword arguments, such as: self.CPS_play(url, repeat=True) Args: same as the Audioservice.play method """ # Inject the user's utterance in case the audio backend wants to # interpret it. E.g. "play some rock at full volume on the stereo" if 'utterance' not in kwargs: kwargs['utterance'] = self.play_service_string self.audioservice.play(*args, **kwargs) def stop(self): if self.audioservice.is_playing: self.audioservice.stop() return True else: return False ###################################################################### # Abstract methods # All of the following must be implemented by a skill that wants to # act as a CommonPlay Skill @abstractmethod def CPS_match_query_phrase(self, phrase): """ Analyze phrase to see if it is a play-able phrase with this skill. Args: phrase (str): User phrase uttered after "Play", e.g. "some music" Returns: (match, CPSMatchLevel[, callback_data]) or None: Tuple containing a string with the appropriate matching phrase, the PlayMatch type, and optionally data to return in the callback if the match is selected. """ # Derived classes must implement this, e.g. # # if phrase in ["Zoosh"]: # return ("Zoosh", CPSMatchLevel.Generic, {"hint": "music"}) # or: # zoosh_song = find_zoosh(phrase) # if zoosh_song and "Zoosh" in phrase: # # "play Happy Birthday in Zoosh" # return ("Zoosh", CPSMatchLevel.MULTI_KEY, {"song": zoosh_song}) # elif zoosh_song: # # "play Happy Birthday" # return ("Zoosh", CPSMatchLevel.TITLE, {"song": zoosh_song}) # elif "Zoosh" in phrase # # "play Zoosh" # return ("Zoosh", CPSMatchLevel.GENERIC, {"cmd": "random"}) return None @abstractmethod def CPS_start(self, phrase, data): """ Begin playing whatever is specified in 'phrase' Args: phrase (str): User phrase uttered after "Play", e.g. "some music" data (dict): Callback data specified in match_query_phrase() """ # Derived classes must implement this, e.g. # self.CPS_play("http://zoosh.com/stream_music") pass
class PodcastSkill(MycroftSkill): # The constructor of the skill, which calls MycroftSkill's constructor def __init__(self): super(PodcastSkill, self).__init__(name="PodcastSkill") self.process = None self.audioservice = None def initialize(self): play_podcast_intent = IntentBuilder("PlayPodcastIntent").require( "PlayPodcastKeyword").build() self.register_intent(play_podcast_intent, self.handle_play_podcast_intent) latest_episode_intent = IntentBuilder("LatestEpisodeIntent").require( "LatestEpisodeKeyword").build() self.register_intent(latest_episode_intent, self.handle_latest_episode_intent) if AudioService: self.audioservice = AudioService(self.emitter) def chosen_podcast(self, utter, podcast_names, podcast_urls): listen_url = "" for i in range(0, len(podcast_names)): #check for empty podcast settings if podcast_names[i] == "": continue try: if podcast_names[i].lower() in utter.lower(): listen_url = podcast_urls[i] except: pass return listen_url def handle_play_podcast_intent(self, message): utter = message.data['utterance'] podcast_names = [ self.settings["nameone"], self.settings["nametwo"], self.settings["namethree"] ] podcast_urls = [ self.settings["feedone"], self.settings["feedtwo"], self.settings["feedthree"] ] listen_url = self.chosen_podcast(utter, podcast_names, podcast_urls) #if misheard, retry and return false if Mycroft could not hear the name of the podcast try_count = 0 while (listen_url == "" and try_count < 2): try_count += 1 response = self.get_response('nomatch') listen_url = self.chosen_podcast(response, podcast_names, podcast_urls) if try_count == 1 and listen_url == "": self.speak_dialog('not.found') return False #normalise feed and parse it normalised_feed = pp.normalize_feed_url(listen_url) parsed_feed = pp.parse(normalised_feed, urllib.urlopen(normalised_feed)) #Check what episode the user wants episode_index = 0 #This block adds functionality for the user to choose an episode while (True): episode_title = parsed_feed['episodes'][episode_index]['title'] podcast_title = parsed_feed['title'] data_dict = { "podcast_title": podcast_title, "episode_title": episode_title } if episode_index == 0: response = self.get_response('play.previous', data=data_dict, on_fail='please.repeat') else: response = self.get_response('play.next.previous', data=data_dict, on_fail='please.repeat') #error check if response is None: break if "stop" in response: self.speak("Operation cancelled.") return False elif "play" in response: break elif "previous" in response: episode_index += 1 elif "next" in response: #ensure index doesnt go below zero if episode_index != 0: episode_index -= 1 self.speak("Playing podcast.") time.sleep(1) #some feeds have different formats, these two were the most common ones I found so it will try them both try: episode = ( parsed_feed["episodes"][episode_index]["enclosures"][0]["url"]) except: self.speak_dialog('badrss') #check for any redirects episode = urllib.urlopen(episode) redirected_episode = episode.geturl() # if audio service module is available use it if self.audioservice: self.audioservice.play(redirected_episode, message.data['utterance']) else: # othervice use normal mp3 playback self.process = play_mp3(redirected_episode) self.enclosure.mouth_text(episode_title) def handle_latest_episode_intent(self, message): utter = message.data['utterance'] podcast_names = [ self.settings["nameone"], self.settings["nametwo"], self.settings["namethree"] ] podcast_urls = [ self.settings["feedone"], self.settings["feedtwo"], self.settings["feedthree"] ] #check if the user specified a podcast to check for a new podcast for i in range(0, len(podcast_names)): #skip if podcast slot left empty if podcast_names[i] == "": continue elif podcast_names[i].lower() in utter.lower(): parsed_feed = pp.parse(podcast_urls[i], urllib.urlopen(podcast_urls[i])) last_episode = (parsed_feed['episodes'][0]['title']) speech_string = "The latest episode of " + podcast_names[ i] + " is " + last_episode self.speak(speech_string) return True #if no podcast names are provided, list all new episodes new_episodes = [] for i in range(0, len(podcast_urls)): if not podcast_urls[i]: continue parsed_feed = pp.parse(podcast_urls[i], urllib.urlopen(podcast_urls[i])) last_episode = (parsed_feed['episodes'][0]['title']) new_episodes.append(last_episode) speech_string = "The latest episodes are the following: " for i in range(0, len(new_episodes)): #if the podcast is the last in a list add "and" before the podcast name if i == (len(new_episodes) - 1) and i > 0: speech_string = speech_string + "and " + podcast_names[ i] + ": " + new_episodes[i] else: speech_string = speech_string + podcast_names[ i] + ": " + new_episodes[i] + ", " self.speak(speech_string) def stop(self): pass
class Jellyfin(CommonPlaySkill): def __init__(self): super().__init__() self._setup = False self.audio_service = None self.jellyfin_croft = None self.songs = [] self.device_id = hashlib.md5( ('Jellyfin'+DeviceApi().identity.uuid).encode())\ .hexdigest() def CPS_match_query_phrase(self, phrase): """ This method responds whether the skill can play the input phrase. The method is invoked by the PlayBackControlSkill. Returns: tuple (matched phrase(str), match level(CPSMatchLevel), optional data(dict)) or None if no match was found. """ # slower devices like raspberry pi's need a bit more time. self.CPS_extend_timeout(10) # first thing is connect to jellyfin or bail if not self.connect_to_jellyfin(): return None self.log.debug("CPS Phrase: " + phrase) match_type, self.songs = self.jellyfin_croft.parse_common_phrase(phrase) if match_type and self.songs: match_level = None if match_type != None: self.log.info('Found match of type: ' + match_type) if match_type == 'song' or match_type == 'album' or match_type == 'playlist' or match_type == 'genre': match_level = CPSMatchLevel.TITLE elif match_type == 'artist': match_level = CPSMatchLevel.ARTIST self.log.info('match level :' + str(match_level)) song_data = dict() song_data[phrase] = self.songs self.log.info("First 3 item urls returned") max_songs_to_log = 3 songs_logged = 0 for song in self.songs: self.log.debug(song) songs_logged = songs_logged + 1 if songs_logged >= max_songs_to_log: break return phrase, CPSMatchLevel.TITLE, song_data else: return None def CPS_start(self, phrase, data): """ Starts playback. Called by the playback control skill to start playback if the skill is selected (has the best match level) """ # setup audio service self.audio_service = AudioService(self.bus) self.speak_playing(phrase) self.audio_service.play(data[phrase]) self.CPS_send_tracklist(self.jellyfin_croft.get_track_list()) def connect_to_jellyfin(self, diagnostic=False): """ Attempts to connect to the server based on the config if diagnostic is False an attempt to auth is also made returns true/false on success/failure respectively :return: """ auth_success = False self.log.debug("Testing connection to: " + self.settings["hostname"]) try: self.jellyfin_croft = JellyfinCroft( self.settings["hostname"] + ":" + str(self.settings["port"]), self.settings["username"], self.settings["password"], self.device_id, diagnostic) auth_success = True except Exception as e: self.log.info("failed to connect to jellyfin, error: {0}".format(str(e))) return auth_success def initialize(self): pass @intent_file_handler('jellyfin.intent') def handle_jellyfin(self, message): self.log.info(message.data) # first thing is connect to jellyfin or bail if not self.connect_to_jellyfin(): self.speak_dialog('configuration_fail') return # determine intent intent, intent_type = JellyfinCroft.determine_intent(message.data) self.songs = [] try: self.songs = self.jellyfin_croft.handle_intent(intent, intent_type) except Exception as e: self.log.info(e) self.speak_dialog('play_fail', {"media": intent}) if not self.songs or len(self.songs) < 1: self.log.info('No songs Returned') self.speak_dialog('play_fail', {"media": intent}) else: # setup audio service and play self.audio_service = AudioService(self.bus) backends = self.audio_service.available_backends() self.log.debug("BACKENDS. VLC Recommended") for key , value in backends.items(): self.log.debug(str(key) + " : " + str(value)) self.speak_playing(intent) self.audio_service.play(self.songs, message.data['utterance']) @intent_file_handler('shuffle.intent') def handle_shuffle(self, message): self.log.info(message.data) # Back up meta data track_meta = self.jellyfin_croft.get_all_meta() # first thing is connect to jellyfin or bail if not self.connect_to_jellyfin(): self.speak_dialog('configuration_fail') return if not self.songs or len(self.songs) < 1: self.log.info('No songs Returned') self.speak_dialog('shuffle_fail') else: self.log.info(track_meta) # setup audio service and, suffle play shuffle(self.songs) self.audio_service = AudioService(self.bus) self.speak_dialog('shuffle') self.audio_service.play(self.songs, message.data['utterance']) # Restore meta data self.jellyfin_croft.set_meta(track_meta) def speak_playing(self, media): data = dict() data['media'] = media self.speak_dialog('jellyfin', data) @intent_file_handler('playingsong.intent') def handle_playing(self, message): track = "Unknown" artist = "Unknown" if self.audio_service.is_playing: # See if I can get the current track index instead track = self.audio_service.track_info()['name'] artist = self.audio_service.track_info()['artists'] if artist != [None]: self.speak_dialog('whatsplaying', {'track' : track, 'artist': artist}) else: track = self.jellyfin_croft.get_meta(self.audio_service.track_info()['name']) if track != False: self.speak_dialog('whatsplaying', {'track' : track['Name'], 'artist': track['Artists']}) else: self.speak_dialog('notrackinfo') else: self.speak_dialog('notplaying') @intent_file_handler('playlist.intent') def handle_playlist_add(self, message): if self.audio_service.is_playing: track = self.audio_service.track_info()['name'] track_name = self.jellyfin_croft.get_meta(track) add_to = self.jellyfin_croft.add_to_playlist(track, message.data.get('playlist_name')) if add_to == True: self.speak_dialog('playlist', {'media' : track_name['Name'], 'playlist_name' : message.data.get('playlist_name')}) return self.speak_dialog('playlist_fail', {'media' : track_name['Name'], 'playlist_name' : message.data.get('playlist_name')}) return @intent_file_handler('diagnostic.intent') def handle_diagnostic(self, message): self.log.info(message.data) self.speak_dialog('diag_start') # connec to jellyfin for diagnostics self.connect_to_jellyfin(diagnostic=True) connection_success, info = self.jellyfin_croft.diag_public_server_info() if connection_success: self.speak_dialog('diag_public_info_success', info) else: self.speak_dialog('diag_public_info_fail', {'host': self.settings['hostname']}) self.speak_dialog('general_check_settings_logs') self.speak_dialog('diag_stop') return if not self.connect_to_jellyfin(): self.speak_dialog('diag_auth_fail') self.speak_dialog('diag_stop') return else: self.speak_dialog('diag_auth_success') self.speak_dialog('diagnostic') def stop(self): pass
class TuneinSkill(CommonPlaySkill): def __init__(self): super().__init__(name="TuneinSkill") self.station_name = None self.stream_url = None self.mpeg_url = None self.process = None self.regexes = {} def initialize(self): pass def CPS_match_query_phrase(self, phrase): # Look for regex matches starting from the most specific to the least # Play <data> internet radio on tune in match = re.search(self.translate_regex('internet_radio_on_tunein'), phrase) if match: data = re.sub(self.translate_regex('internet_radio_on_tunein'), '', phrase) LOG.debug("CPS Match (internet_radio_on_tunein): " + data) return phrase, CPSMatchLevel.EXACT, data # Play <data> radio on tune in match = re.search(self.translate_regex('radio_on_tunein'), phrase) if match: data = re.sub(self.translate_regex('radio_on_tunein'), '', phrase) LOG.debug("CPS Match (radio_on_tunein): " + data) return phrase, CPSMatchLevel.EXACT, data # Play <data> on tune in match = re.search(self.translate_regex('on_tunein'), phrase) if match: data = re.sub(self.translate_regex('on_tunein'), '', phrase) LOG.debug("CPS Match (on_tunein): " + data) return phrase, CPSMatchLevel.EXACT, data # Play <data> internet radio match = re.search(self.translate_regex('internet_radio'), phrase) if match: data = re.sub(self.translate_regex('internet_radio'), '', phrase) LOG.debug("CPS Match (internet_radio): " + data) return phrase, CPSMatchLevel.CATEGORY, data # Play <data> radio match = re.search(self.translate_regex('radio'), phrase) if match: data = re.sub(self.translate_regex('radio'), '', phrase) LOG.debug("CPS Match (radio): " + data) return phrase, CPSMatchLevel.CATEGORY, data return phrase, CPSMatchLevel.GENERIC, phrase def stop(self): pass def CPS_start(self, phrase, data): LOG.debug("CPS Start: " + data) self.find_station(data) @intent_file_handler('StreamRequest.intent') def handle_stream_intent(self, message): self.find_station(message.data["station"], message.data["utterance"]) LOG.debug("Station data: " + message.data["station"]) # Attempt to find the first active station matching the query string def find_station(self, search_term, utterance = None): payload = { "query" : search_term } # get the response from the TuneIn API res = requests.post(base_url, data=payload, headers=headers) dom = parseString(res.text) # results are each in their own <outline> tag as defined by OPML (https://en.wikipedia.org/wiki/OPML) entries = dom.getElementsByTagName("outline") # Loop through outlines in the lists for entry in entries: # Only look at outlines that are of type=audio and item=station if (entry.getAttribute("type") == "audio") and (entry.getAttribute("item") == "station"): if (entry.getAttribute("key") != "unavailable"): # Ignore entries that are marked as unavailable self.mpeg_url = entry.getAttribute("URL") self.station_name = entry.getAttribute("text") # this URL will return audio/x-mpegurl data. This is just a list of URLs to the real streams self.stream_url = self.get_stream_url(self.mpeg_url) LOG.debug("Found stream URL: " + self.stream_url) self.audio_service = AudioService(self.bus) self.speak_dialog("now.playing", {"station": self.station_name} ) wait_while_speaking() self.audio_service.play(self.stream_url, utterance) return # We didn't find any playable stations self.speak_dialog("not.found") wait_while_speaking() LOG.debug("Could not find a station with the query term: " + search_term) def get_stream_url(self, mpegurl): res = requests.get(mpegurl) # Get the first line from the results for line in res.text.splitlines(): return self.process_url(line) # Check what kind of url was pulled from the x-mpegurl data def process_url(self, url): if (len(url) > 4): if url[-3:] == 'm3u': return url[:-4] if url[-3:] == 'pls': return self.process_pls(url) else: return url return url # Pull down the pls data and pull out the real stream url out of it def process_pls(self, url): res = requests.get(url) # Loop through the data looking for the first url for line in res.text.splitlines(): if line.startswith("File1="): return line[6:] # Get the correct localized regex def translate_regex(self, regex): if regex not in self.regexes: path = self.find_resource(regex + '.regex') if path: with open(path) as f: string = f.read().strip() self.regexes[regex] = string return self.regexes[regex]
class NoAgendaSkill(MycroftSkill): def __init__(self): super(NoAgendaSkill, self).__init__(name="NoAgendaSkill") self.process = None self.audioservice = None def initialize(self): if AudioService: self.audioservice = AudioService(self.emitter) @property def url_rss(self): pre_select = self.settings.get("pre_select", "") url_rss = self.settings.get("url_rss") if "not_set" in pre_select: # Use a custom RSS URL url_rss = self.settings.get("url_rss") else: # Use the selected preset's URL url_rss = pre_select if not url_rss and 'url_rss' in self.config: url_rss = self.config['url_rss'] return url_rss @intent_handler( IntentBuilder("anycollusion").require("anycollusion").build()) def handle_anycollusion_intent(self, message): try: self.stop() self.speak_dialog('NoAgenda') feeddata = feedparser.parse(self.url_rss) data = feeddata.entries[0] # Stop anything already playing url = data.enclosures[0]['url'] LOG.info('anycollusion') LOG.info(url) # After the intro, start the no agenda stream # if audio service module is available use it sleep(1.0) wait_while_speaking() if self.audioservice: LOG.info('AudioService') self.audioservice.play(url, message.data['utterance']) else: # othervice use normal mp3 playback LOG.info('playmp3') self.process = play_mp3(url) except Exception as e: LOG.error("Error: {0}".format(e)) @intent_handler( IntentBuilder("mymillenials").require("mymillenials").build()) def handle_mymillenials_intent(self, message): try: self.stop() self.speak_dialog('NoAgenda') feeddata = feedparser.parse(self.url_rss) data = feeddata.entries[0] # Stop anything already playing url = data.enclosures[0]['url'] LOG.info('mymillenials') LOG.info(url) # After the intro, start the no agenda stream # if audio service module is available use it sleep(1.0) wait_while_speaking() if self.audioservice: LOG.info('AudioService') self.audioservice.play(url, message.data['utterance']) else: # othervice use normal mp3 playback LOG.info('playmp3') self.process = play_mp3(url) except Exception as e: LOG.error("Error: {0}".format(e)) @intent_handler(IntentBuilder("buildawall").require("buildawall").build()) def handle_buildawall_intent(self, message): try: self.stop() self.speak_dialog('NoAgenda') feeddata = feedparser.parse(self.url_rss) data = feeddata.entries[0] # Stop anything already playing url = data.enclosures[0]['url'] LOG.info('buildawall') LOG.info(url) # After the intro, start the no agenda stream # if audio service module is available use it sleep(1.0) wait_while_speaking() if self.audioservice: LOG.info('AudioService') self.audioservice.play(url, message.data['utterance']) else: # othervice use normal mp3 playback LOG.info('playmp3') self.process = play_mp3(url) except Exception as e: LOG.error("Error: {0}".format(e)) @intent_handler( IntentBuilder("resistwemuch").require("resistwemuch").build()) def handle_resistwemuch_intent(self, message): try: self.stop() self.speak_dialog('NoAgenda') feeddata = feedparser.parse(self.url_rss) data = feeddata.entries[0] # Stop anything already playing url = data.enclosures[0]['url'] LOG.info('resistwemuch') LOG.info(url) # After the intro, start the no agenda stream # if audio service module is available use it sleep(1.0) wait_while_speaking() if self.audioservice: LOG.info('AudioService') self.audioservice.play(url, message.data['utterance']) else: # othervice use normal mp3 playback LOG.info('playmp3') self.process = play_mp3(url) except Exception as e: LOG.error("Error: {0}".format(e)) @intent_handler(IntentBuilder("needs").require("needs").build()) def handle_needs_intent(self, message): try: self.stop() self.speak_dialog('NoAgenda') feeddata = feedparser.parse(self.url_rss) data = feeddata.entries[0] # Stop anything already playing url = data.enclosures[0]['url'] LOG.info('needs') LOG.info(url) # After the intro, start the no agenda stream # if audio service module is available use it sleep(1.0) wait_while_speaking() if self.audioservice: LOG.info('AudioService') self.audioservice.play(url, message.data['utterance']) else: # othervice use normal mp3 playback LOG.info('playmp3') self.process = play_mp3(url) except Exception as e: LOG.error("Error: {0}".format(e)) @intent_handler( IntentBuilder("inthemorning").require("inthemorning").build()) def handle_inthemorning_intent(self, message): try: self.stop() self.speak_dialog('NoAgenda') feeddata = feedparser.parse(self.url_rss) data = feeddata.entries[0] # Stop anything already playing url = data.enclosures[0]['url'] LOG.info('inthemorning') LOG.info(url) # After the intro, start the no agenda stream # if audio service module is available use it sleep(1.0) wait_while_speaking() if self.audioservice: LOG.info('AudioService') self.audioservice.play(url, message.data['utterance']) else: # othervice use normal mp3 playback LOG.info('playmp3') self.process = play_mp3(url) except Exception as e: LOG.error("Error: {0}".format(e)) @intent_handler(IntentBuilder("triggered").require("triggered").build()) def handle_triggered_intent(self, message): try: self.stop() self.speak_dialog('NoAgenda') feeddata = feedparser.parse(self.url_rss) data = feeddata.entries[0] # Stop anything already playing url = data.enclosures[0]['url'] LOG.info('triggered') LOG.info(url) # After the intro, start the no agenda stream # if audio service module is available use it sleep(1.0) wait_while_speaking() if self.audioservice: LOG.info('AudioService') self.audioservice.play(url, message.data['utterance']) else: # othervice use normal mp3 playback LOG.info('playmp3') self.process = play_mp3(url) except Exception as e: LOG.error("Error: {0}".format(e)) @intent_handler( IntentBuilder("penultimate").optionally("Play").require( "penultimate").require("NoAgenda").build()) def handle_penultimate_intent(self, message): try: self.stop() feeddata = feedparser.parse(self.url_rss) data = feeddata.entries[1] # Stop anything already playing url = data.enclosures[0]['url'] LOG.info('penultimate') LOG.info(url) # After the intro, start the no agenda stream # if audio service module is available use it wait_while_speaking() if self.audioservice: LOG.info('AudioService') self.audioservice.play(url, message.data['utterance']) else: # othervice use normal mp3 playback LOG.info('playmp3') self.process = play_mp3(url) except Exception as e: LOG.error("Error: {0}".format(e)) @intent_handler( IntentBuilder("third").optionally("Play").require("3rd").require( "NoAgenda").build()) def handle_third_intent(self, message): try: self.stop() feeddata = feedparser.parse(self.url_rss) data = feeddata.entries[2] # Stop anything already playing url = data.enclosures[0]['url'] LOG.info('third') LOG.info(url) # After the intro, start the no agenda stream # if audio service module is available use it wait_while_speaking() if self.audioservice: LOG.info('AudioService') self.audioservice.play(url, message.data['utterance']) else: # othervice use normal mp3 playback LOG.info('playmp3') self.process = play_mp3(url) except Exception as e: LOG.error("Error: {0}".format(e)) @intent_handler( IntentBuilder("fourth").optionally("Play").require("4th").require( "NoAgenda").build()) def handle_fourth_intent(self, message): try: self.stop() feeddata = feedparser.parse(self.url_rss) data = feeddata.entries[3] # Stop anything already playing url = data.enclosures[0]['url'] LOG.info('fourth') LOG.info(url) # After the intro, start the no agenda stream # if audio service module is available use it wait_while_speaking() if self.audioservice: LOG.info('AudioService') self.audioservice.play(url, message.data['utterance']) else: # othervice use normal mp3 playback LOG.info('playmp3') self.process = play_mp3(url) except Exception as e: LOG.error("Error: {0}".format(e)) @intent_handler( IntentBuilder("fifth").optionally("Play").require("5th").require( "NoAgenda").build()) def handle_fifth_intent(self, message): try: self.stop() feeddata = feedparser.parse(self.url_rss) data = feeddata.entries[4] # Stop anything already playing url = data.enclosures[0]['url'] LOG.info('fifth') LOG.info(url) # After the intro, start the no agenda stream # if audio service module is available use it wait_while_speaking() if self.audioservice: LOG.info('AudioService') self.audioservice.play(url, message.data['utterance']) else: # othervice use normal mp3 playback LOG.info('playmp3') self.process = play_mp3(url) except Exception as e: LOG.error("Error: {0}".format(e)) @intent_handler( IntentBuilder("sixth").optionally("Play").require("6th").require( "NoAgenda").build()) def handle_sixth_intent(self, message): try: self.stop() feeddata = feedparser.parse(self.url_rss) data = feeddata.entries[5] # Stop anything already playing url = data.enclosures[0]['url'] LOG.info('sixth') LOG.info(url) # After the intro, start the no agenda stream # if audio service module is available use it wait_while_speaking() if self.audioservice: LOG.info('AudioService') self.audioservice.play(url, message.data['utterance']) else: # othervice use normal mp3 playback LOG.info('playmp3') self.process = play_mp3(url) except Exception as e: LOG.error("Error: {0}".format(e)) @intent_handler( IntentBuilder("seventh").optionally("Play").require("7th").require( "NoAgenda").build()) def handle_seventh_intent(self, message): try: self.stop() feeddata = feedparser.parse(self.url_rss) data = feeddata.entries[6] # Stop anything already playing url = data.enclosures[0]['url'] LOG.info('seventh') LOG.info(url) # After the intro, start the no agenda stream # if audio service module is available use it wait_while_speaking() if self.audioservice: LOG.info('AudioService') self.audioservice.play(url, message.data['utterance']) else: # othervice use normal mp3 playback LOG.info('playmp3') self.process = play_mp3(url) except Exception as e: LOG.error("Error: {0}".format(e)) @intent_handler( IntentBuilder("eighth").optionally("Play").require("8th").require( "NoAgenda").build()) def handle_eighth_intent(self, message): try: self.stop() feeddata = feedparser.parse(self.url_rss) data = feeddata.entries[7] # Stop anything already playing url = data.enclosures[0]['url'] LOG.info('eighth') LOG.info(url) # After the intro, start the no agenda stream # if audio service module is available use it wait_while_speaking() if self.audioservice: LOG.info('AudioService') self.audioservice.play(url, message.data['utterance']) else: # othervice use normal mp3 playback LOG.info('playmp3') self.process = play_mp3(url) except Exception as e: LOG.error("Error: {0}".format(e)) @intent_handler( IntentBuilder("ninth").optionally("Play").require("9th").require( "NoAgenda").build()) def handle_ninth_intent(self, message): try: self.stop() feeddata = feedparser.parse(self.url_rss) data = feeddata.entries[8] # Stop anything already playing url = data.enclosures[0]['url'] LOG.info('ninth') LOG.info(url) # After the intro, start the no agenda stream # if audio service module is available use it wait_while_speaking() if self.audioservice: LOG.info('AudioService') self.audioservice.play(url, message.data['utterance']) else: # othervice use normal mp3 playback LOG.info('playmp3') self.process = play_mp3(url) except Exception as e: LOG.error("Error: {0}".format(e)) @intent_handler( IntentBuilder("tenth").optionally("Play").require("10th").require( "NoAgenda").build()) def handle_tenth_intent(self, message): try: self.stop() feeddata = feedparser.parse(self.url_rss) data = feeddata.entries[9] # Stop anything already playing url = data.enclosures[0]['url'] LOG.info('tenth') LOG.info(url) # After the intro, start the no agenda stream # if audio service module is available use it wait_while_speaking() if self.audioservice: LOG.info('AudioService') self.audioservice.play(url, message.data['utterance']) else: # othervice use normal mp3 playback LOG.info('playmp3') self.process = play_mp3(url) except Exception as e: LOG.error("Error: {0}".format(e)) @intent_handler( IntentBuilder("eleventh").optionally("Play").require("11th").require( "NoAgenda").build()) def handle_eleventh_intent(self, message): try: self.stop() feeddata = feedparser.parse(self.url_rss) data = feeddata.entries[10] # Stop anything already playing url = data.enclosures[0]['url'] LOG.info('eleventh') LOG.info(url) # After the intro, start the no agenda stream # if audio service module is available use it wait_while_speaking() if self.audioservice: LOG.info('AudioService') self.audioservice.play(url, message.data['utterance']) else: # othervice use normal mp3 playback LOG.info('playmp3') self.process = play_mp3(url) except Exception as e: LOG.error("Error: {0}".format(e)) @intent_handler( IntentBuilder("twelfth").optionally("Play").require("12th").require( "NoAgenda").build()) def handle_twelfth_intent(self, message): try: self.stop() feeddata = feedparser.parse(self.url_rss) data = feeddata.entries[11] # Stop anything already playing url = data.enclosures[0]['url'] LOG.info('twelfth') LOG.info(url) # After the intro, start the no agenda stream # if audio service module is available use it wait_while_speaking() if self.audioservice: LOG.info('AudioService') self.audioservice.play(url, message.data['utterance']) else: # othervice use normal mp3 playback LOG.info('playmp3') self.process = play_mp3(url) except Exception as e: LOG.error("Error: {0}".format(e)) @intent_handler( IntentBuilder("thirteenth").optionally("Play").require("13th").require( "NoAgenda").build()) def handle_thirteenth_intent(self, message): try: self.stop() feeddata = feedparser.parse(self.url_rss) data = feeddata.entries[12] # Stop anything already playing url = data.enclosures[0]['url'] LOG.info('thirteenth') LOG.info(url) # After the intro, start the no agenda stream # if audio service module is available use it wait_while_speaking() if self.audioservice: LOG.info('AudioService') self.audioservice.play(url, message.data['utterance']) else: # othervice use normal mp3 playback LOG.info('playmp3') self.process = play_mp3(url) except Exception as e: LOG.error("Error: {0}".format(e)) @intent_handler( IntentBuilder("fourteenth").optionally("Play").require("14th").require( "NoAgenda").build()) def handle_fourteenth_intent(self, message): try: self.stop() feeddata = feedparser.parse(self.url_rss) data = feeddata.entries[13] # Stop anything already playing url = data.enclosures[0]['url'] LOG.info('fourteenth') LOG.info(url) # After the intro, start the no agenda stream # if audio service module is available use it wait_while_speaking() if self.audioservice: LOG.info('AudioService') self.audioservice.play(url, message.data['utterance']) else: # othervice use normal mp3 playback LOG.info('playmp3') self.process = play_mp3(url) except Exception as e: LOG.error("Error: {0}".format(e)) @intent_handler( IntentBuilder("fifteenth").optionally("Play").require("15th").require( "NoAgenda").build()) def handle_fifteenth_intent(self, message): try: self.stop() feeddata = feedparser.parse(self.url_rss) data = feeddata.entries[14] # Stop anything already playing url = data.enclosures[0]['url'] LOG.info('fifteenth') LOG.info(url) # After the intro, start the no agenda stream # if audio service module is available use it wait_while_speaking() if self.audioservice: LOG.info('AudioService') self.audioservice.play(url, message.data['utterance']) else: # othervice use normal mp3 playback LOG.info('playmp3') self.process = play_mp3(url) except Exception as e: LOG.error("Error: {0}".format(e)) @intent_handler( IntentBuilder("live").require("Jack").require("Live").optionally( "NoAgenda").require("Stream").build()) def handle_live_intent(self, message): try: # Stop anything already playing self.stop() url = 'https://listen.noagendastream.com/noagenda.pls' LOG.info('live') LOG.info(url) # After the intro, start the no agenda stream # if audio service module is available use it wait_while_speaking() if self.audioservice: LOG.info('AudioService') self.audioservice.play(url, message.data['utterance']) else: # othervice use normal mp3 playback LOG.info('playmp3') self.process = play_mp3(url) except Exception as e: LOG.error("Error: {0}".format(e)) @intent_handler( IntentBuilder("query").require("query").optionally("latest").require( "NoAgenda").build()) def handle_query_intent(self, message): try: self.stop() feeddata = feedparser.parse(self.url_rss) data = feeddata.entries[0] # Stop anything already playing url = data.enclosures[0]['url'] LOG.info('query') LOG.info(url) # After the intro, start the no agenda stream # if audio service module is available use it wait_while_speaking() if self.audioservice: LOG.info('AudioService') self.audioservice.play(url, message.data['utterance']) else: # othervice use normal mp3 playback LOG.info('playmp3') self.process = play_mp3(url) except Exception as e: LOG.error("Error: {0}".format(e)) def stop(self): if self.audioservice: self.audioservice.stop() else: if self.process and self.process.poll() is None: self.process.terminate() self.process.wait() @intent_handler( IntentBuilder("random").optionally("Play").optionally( "random").require("NoAgenda").build()) def handle_random_intent(self, message): try: self.stop() random_episode = random.randint(0, 14) feeddata = feedparser.parse(self.url_rss) data = feeddata.entries[random_episode] # Stop anything already playing url = data.enclosures[0]['url'] LOG.info('random') LOG.info(random_episode) LOG.info(url) # After the intro, start the no agenda stream # if audio service module is available use it wait_while_speaking() if self.audioservice: LOG.info('AudioService') self.audioservice.play(url, message.data['utterance']) else: # othervice use normal mp3 playback LOG.info('playmp3') self.process = play_mp3(url) except Exception as e: LOG.error("Error: {0}".format(e)) @intent_handler( IntentBuilder("latest").optionally("Play").optionally( "latest").require("NoAgenda").build()) def handle_latest_intent(self, message): try: self.stop() feeddata = feedparser.parse(self.url_rss) data = feeddata.entries[0] # Stop anything already playing url = data.enclosures[0]['url'] LOG.info('latest') LOG.info(url) # After the intro, start the no agenda stream # if audio service module is available use it wait_while_speaking() if self.audioservice: LOG.info('AudioService') self.audioservice.play(url, message.data['utterance']) else: # othervice use normal mp3 playback LOG.info('playmp3') self.process = play_mp3(url) except Exception as e: LOG.error("Error: {0}".format(e))
class YoutubeSkill(MycroftSkill): def __init__(self): super(YoutubeSkill, self).__init__(name="YoutubeSkill") self.audio_service = None self.p = None def initialize(self): if AudioService: self.audio_service = AudioService(self.emitter) self.register_intent_file("youtube.intent", self.handle_play_song_intent) def handle_play_song_intent(self, message): # Play the song requested title = message.data.get("music") # mark 1 hack if AudioService: self.audio_service.stop() self.speak_dialog("searching.youtube", {"music": title}) videos = [] url = "https://www.youtube.com/watch?v=" self.log.info("Searching youtube for " + title) for v in self.search(title): if "channel" not in v and "list" not in v and "user" not in v: videos.append(url + v) self.log.info("Youtube Links:" + str(videos)) # Display icon on faceplate self.enclosure.deactivate_mouth_events() # music code self.enclosure.mouth_display("IIAEAOOHGAGEGOOHAA", x=10, y=0, refresh=True) wait_while_speaking() if AudioService: self.audio_service.stop() self.audio_service.play(videos, "vlc") else: command = ['cvlc'] command.append('--no-video') # disables video output. command.append('--play-and-exit') # close cvlc after play command.append('--quiet') # deactivates all console messages. command.append(videos[0]) self.p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = self.p.communicate() def search(self, text): query = quote(text) url = "https://www.youtube.com/results?search_query=" + query response = urlopen(url) html = response.read() soup = BeautifulSoup(html) vid = soup.findAll(attrs={'class': 'yt-uix-tile-link'}) videos = [] if vid: for video in vid: videos.append(video['href'].replace("/watch?v=", "")) return videos def stop(self): self.enclosure.activate_mouth_events() self.enclosure.mouth_reset() if self.p: self.p.terminate()
class Emby(CommonPlaySkill): def __init__(self): super().__init__() self._setup = False self.audio_service = None self.emby_croft = None self.device_id = hashlib.md5( ('Emby'+DeviceApi().identity.uuid).encode())\ .hexdigest() def initialize(self): pass @intent_file_handler('emby.intent') def handle_emby(self, message): self.log.log(20, message.data) # first thing is connect to emby or bail if not self.connect_to_emby(): self.speak_dialog('configuration_fail') return # determine intent intent, intent_type = EmbyCroft.determine_intent(message.data) songs = [] try: songs = self.emby_croft.handle_intent(intent, intent_type) except Exception as e: self.log.log(20, e) self.speak_dialog('play_fail', {"media": intent}) if not songs or len(songs) < 1: self.log.log(20, 'No songs Returned') self.speak_dialog('play_fail', {"media": intent}) else: # setup audio service and play self.audio_service = AudioService(self.bus) self.speak_playing(intent) self.audio_service.play(songs, message.data['utterance']) def speak_playing(self, media): data = dict() data['media'] = media self.speak_dialog('emby', data) @intent_file_handler('diagnostic.intent') def handle_diagnostic(self, message): self.log.log(20, message.data) self.speak_dialog('diag_start') # connec to emby for diagnostics self.connect_to_emby(True) connection_success, info = self.emby_croft.diag_public_server_info() if connection_success: self.speak_dialog('diag_public_info_success', info) else: self.speak_dialog('diag_public_info_fail', {'host': self.settings['hostname']}) self.speak_dialog('general_check_settings_logs') self.speak_dialog('diag_stop') return if not self.connect_to_emby(): self.speak_dialog('diag_auth_fail') self.speak_dialog('diag_stop') return else: self.speak_dialog('diag_auth_success') self.speak_dialog('diagnostic') def stop(self): pass def CPS_start(self, phrase, data): """ Starts playback. Called by the playback control skill to start playback if the skill is selected (has the best match level) """ # setup audio service self.audio_service = AudioService(self.bus) self.audio_service.play(data[phrase]) def CPS_match_query_phrase(self, phrase): """ This method responds whether the skill can play the input phrase. The method is invoked by the PlayBackControlSkill. Returns: tuple (matched phrase(str), match level(CPSMatchLevel), optional data(dict)) or None if no match was found. """ # first thing is connect to emby or bail if not self.connect_to_emby(): return None self.log.log(20, phrase) match_type, songs = self.emby_croft.parse_common_phrase(phrase) if match_type and songs: match_level = None if match_type is not None: self.log.log(20, 'Found match of type: ' + match_type) if match_type == 'song' or match_type == 'album': match_level = CPSMatchLevel.TITLE elif match_type == 'artist': match_level = CPSMatchLevel.ARTIST self.log.log(20, 'match level' + str(match_level)) song_data = dict() song_data[phrase] = songs self.log.log(20, "First 3 item urls returned") max_songs_to_log = 3 songs_logged = 0 for song in songs: self.log.log(20, song) songs_logged = songs_logged + 1 if songs_logged >= max_songs_to_log: break return phrase, match_level, song_data else: return None def connect_to_emby(self, dagnostic=False): """ Attempts to connect to the server based on the config if diagnostic is False an attempt to auth is also made returns true/false on success/failure respectively :return: """ auth_success = False try: self.emby_croft = EmbyCroft( self.settings["hostname"] + ":" + str(self.settings["port"]), self.settings["username"], self.settings["password"], self.device_id, dagnostic) auth_success = True except Exception as e: self.log.log( 20, "failed to connect to emby, error: {0}".format(str(e))) return auth_success
class NewsSkill(MycroftSkill): def __init__(self): super(NewsSkill, self).__init__(name="NewsSkill") self.process = None self.audioservice = None def initialize(self): self.pre_select = self.settings.get("pre_select") self.url_rss = self.settings.get("url_rss") if "not_set" in self.pre_select: # Use a custom RSS URL self.url_rss = self.settings.get("url_rss") else: # Use the selected preset's URL self.url_rss = self.pre_select if not self.url_rss and 'url_rss' in self.config: self.url_rss = self.config['url_rss'] if AudioService: self.audioservice = AudioService(self.emitter) @intent_handler(IntentBuilder("").require("Play").require("News")) def handle_intent(self, message): try: data = feedparser.parse(self.url_rss) # Stop anything already playing self.stop() self.speak_dialog('news') wait_while_speaking() # After the intro, find and start the news stream i = 0 found_audio = False # select the first link to an audio file for link in data['entries'][0]['links']: if 'audio' in link['type']: found_audio = True break i = i + 1 if not found_audio: # fall back to using the first link in the entry i = 0 url = re.sub('https', 'http', data['entries'][0]['links'][i]['href']) # if audio service module is available use it if self.audioservice: self.audioservice.play(url, message.data['utterance']) else: # othervice use normal mp3 playback self.process = play_mp3(url) except Exception as e: LOG.error("Error: {0}".format(e)) def stop(self): if self.audioservice: self.audioservice.stop() else: if self.process and self.process.poll() is None: self.process.terminate() self.process.wait()
class GaleHomeAssistant(MycroftSkill): def __init__(self): MycroftSkill.__init__(self) self.ha = None def initialize(self): self.audio_service = AudioService(self.bus) self.deviceMap = {} self.settings_change_callback = self.on_settings_changed self.on_settings_changed() def on_settings_changed(self): # Create new HA Client with host/token settings self.ha = HomeAssistantClient(self.settings.get('host', ''), self.settings.get('token', '')) # Load and process device map try: self.deviceMap = json.loads(self.settings.get('device_map', '{}')) scenes = self.deviceMap.get('scene', {}) scripts = self.deviceMap.get('script', {}) for sceneName in scenes: self.register_vocabulary(sceneName, 'HAScene') for scriptName in scripts: self.register_vocabulary(scriptName, 'HAScript') except json.JSONDecodeError: self.log.error("Invalid JSON in device map: %s" % self.settings.get('device_map')) @intent_handler(IntentBuilder('RunScript').require('HAScript')) def handle_run_script(self, message): entity = message.data["utterance"] scripts = self.deviceMap.get('script', {}) entityId = scripts.get(entity, '') if not entityId: self.speak_dialog('NotFound', {'entity': entity}) else: self.ha.runScript(entityId) if entity == "good morning": self.speak_dialog('GoodMorning') self.bus.emit( Message("recognizer_loop:utterance", { 'utterances': ["what's the weather"], 'lang': 'en-us' })) else: self.speak_dialog('RunningScript', {'entity': entity}) @intent_handler(IntentBuilder('RunScene').require('HAScene')) def handle_run_scene(self, message): entity = message.data["utterance"] scenes = self.deviceMap.get('scene', {}) entityId = scenes.get(entity, '') if not entityId: self.speak_dialog('NotFound', {'entity': entity}) else: self.ha.runScene(entityId) if entity == "good night": self.speak_dialog('GoodNight') elif entity == "bedtime": self.audio_service.play( 'file:///home/pi/mycroft-core/mycroft/res/snd/acknowledge.mp3' ) else: self.speak_dialog('TurnedOn', {'entity': entity}) @intent_handler(IntentBuilder('TurnOn').require('entityOn')) def handle_turn_on(self, message): entity = message.data.get('entityOn') switches = self.deviceMap.get('switch', {}) lights = self.deviceMap.get('light', {}) # Check switch names first entityId = switches.get(entity, '') # Then check lights if not entityId: entityId = lights.get(entity, '') if not entityId: self.speak_dialog('NotFound', {'entity': entity}) else: self.ha.turnOn(entityId) self.speak_dialog('TurnedOn', {'entity': entity}) @intent_handler(IntentBuilder('TurnOff').require('entityOff')) def handle_turn_off(self, message): entity = message.data.get('entityOff') switches = self.deviceMap.get('switch', {}) lights = self.deviceMap.get('light', {}) # Check switch names first entityId = switches.get(entity, '') # Then check lights if not entityId: entityId = lights.get(entity, '') if not entityId: self.speak_dialog('NotFound', {'entity': entity}) else: self.ha.turnOff(entityId) self.speak_dialog('TurnedOff', {'entity': entity}) @intent_handler( IntentBuilder('SetLightLevel').require('SetVerb').require( 'entity').require('level').optionally('PercentVerb')) def handle_set_level(self, message): entity = message.data.get('entity') level = int(message.data.get('level')) switches = self.deviceMap.get('switch', {}) lights = self.deviceMap.get('light', {}) lightFound = lights.get(entity, '') switchFound = switches.get(entity, '') if lightFound: self.ha.setLevel(lightFound, level) if switchFound: if level > 0: self.ha.turnOn(switchFound) else: self.ha.turnOff(switchFound) if lightFound: self.speak_dialog('LightLevel', {'entity': entity, 'level': level}) elif switchFound: self.speak_dialog('WrongType', {'entity': entity}) else: self.speak_dialog('NotFound', {'entity': entity}) def stop(self): pass