class PlaybackControlSkill(MycroftSkill): def __init__(self): super(PlaybackControlSkill, self).__init__('Playback Control Skill') def initialize(self): self.log.info('initializing Playback Control Skill') self.audio_service = AudioService(self.emitter) # Handle common audio intents. 'Audio' skills should listen for the # common messages: # self.add_event('mycroft.audio.service.next', SKILL_HANDLER) # self.add_event('mycroft.audio.service.prev', SKILL_HANDLER) # self.add_event('mycroft.audio.service.pause', SKILL_HANDLER) # self.add_event('mycroft.audio.service.resume', SKILL_HANDLER) @intent_handler(IntentBuilder('').require('Next').require("Track")) def handle_next(self, message): self.audio_service.next() @intent_handler(IntentBuilder('').require('Prev').require("Track")) def handle_prev(self, message): self.audio_service.prev() @intent_handler(IntentBuilder('').require('Pause')) def handle_pause(self, message): self.audio_service.pause() @intent_handler(IntentBuilder('').one_of('PlayResume', 'Resume')) def handle_play(self, message): """Resume playback if paused""" self.audio_service.resume() def stop(self, message=None): self.log.info("Stopping audio") self.emitter.emit(Message('mycroft.audio.service.stop'))
class PlaybackControlSkill(MediaSkill): def __init__(self): super(PlaybackControlSkill, self).__init__('Playback Control Skill') logger.info('Playback Control Inited') def initialize(self): logger.info('initializing Playback Control Skill') super(PlaybackControlSkill, self).initialize() self.load_data_files(dirname(__file__)) self.audio_service = AudioService(self.emitter) def handle_next(self, message): self.audio_service.next() def handle_prev(self, message): self.audio_service.prev() def handle_pause(self, message): self.audio_service.pause() def handle_play(self, message): """Resume playback if paused""" self.audio_service.resume() def handle_currently_playing(self, message): return def stop(self, message=None): logger.info("Stopping audio") self.emitter.emit(Message('mycroft.audio.service.stop'))
class TestAudioServiceControls(TestCase): def assertLastMessageTypeEqual(self, bus, msg_type): message = bus.emit.call_args_list[-1][0][0] self.assertEqual(message.msg_type, msg_type) def setUp(self): self.bus = mock.Mock(name='bus') self.audioservice = AudioService(self.bus) def test_pause(self): self.audioservice.pause() self.assertLastMessageTypeEqual(self.bus, 'mycroft.audio.service.pause') def test_resume(self): self.audioservice.resume() self.assertLastMessageTypeEqual(self.bus, 'mycroft.audio.service.resume') def test_next(self): self.audioservice.next() self.assertLastMessageTypeEqual(self.bus, 'mycroft.audio.service.next') def test_prev(self): self.audioservice.prev() self.assertLastMessageTypeEqual(self.bus, 'mycroft.audio.service.prev') def test_stop(self): self.audioservice.stop() self.assertLastMessageTypeEqual(self.bus, 'mycroft.audio.service.stop') def test_seek(self): self.audioservice.seek() message = self.bus.emit.call_args_list[-1][0][0] self.assertEqual(message.msg_type, 'mycroft.audio.service.seek_forward') self.assertEqual(message.data['seconds'], 1) self.audioservice.seek(5) message = self.bus.emit.call_args_list[-1][0][0] self.assertEqual(message.msg_type, 'mycroft.audio.service.seek_forward') self.assertEqual(message.data['seconds'], 5) self.audioservice.seek(-5) message = self.bus.emit.call_args_list[-1][0][0] self.assertEqual(message.msg_type, 'mycroft.audio.service.seek_backward') self.assertEqual(message.data['seconds'], 5)
class PlaybackControlSkill(MycroftSkill): def __init__(self): super(PlaybackControlSkill, self).__init__('Playback Control Skill') logger.info('Playback Control Inited') def initialize(self): logger.info('initializing Playback Control Skill') self.audio_service = AudioService(self.emitter) # Register common intents, these include basically all intents # except the intents to start playback (which should be implemented by # specific audio skills intent = IntentBuilder('NextIntent').require('NextKeyword') self.register_intent(intent, self.handle_next) intent = IntentBuilder('PrevIntent').require('PrevKeyword') self.register_intent(intent, self.handle_prev) intent = IntentBuilder('PauseIntent').require('PauseKeyword') self.register_intent(intent, self.handle_pause) intent = IntentBuilder('PlayIntent') \ .one_of('PlayResumeKeyword', 'ResumeKeyword') self.register_intent(intent, self.handle_play) def handle_next(self, message): self.audio_service.next() def handle_prev(self, message): self.audio_service.prev() def handle_pause(self, message): self.audio_service.pause() def handle_play(self, message): """Resume playback if paused""" self.audio_service.resume() def stop(self, message=None): logger.info("Stopping audio") self.emitter.emit(Message('mycroft.audio.service.stop'))
class PlaybackControlSkill(MycroftSkill): def __init__(self): super(PlaybackControlSkill, self).__init__('Playback Control Skill') self.query_replies = {} # cache of received replies self.query_extensions = {} # maintains query timeout extensions self.has_played = False self.lock = Lock() # TODO: Make this an option for voc_match()? Only difference is the # comparison using "==" instead of "in" def voc_match_exact(self, utt, voc_filename, lang=None): """ Determine if the given utterance contains the vocabulary provided Checks for vocabulary match in the utterance instead of the other way around to allow the user to say things like "yes, please" and still match against "Yes.voc" containing only "yes". The method first checks in the current skill's .voc files and secondly the "res/text" folder of mycroft-core. The result is cached to avoid hitting the disk each time the method is called. Args: utt (str): Utterance to be tested voc_filename (str): Name of vocabulary file (e.g. 'yes' for 'res/text/en-us/yes.voc') lang (str): Language code, defaults to self.long Returns: bool: True if the utterance has the given vocabulary it """ lang = lang or self.lang cache_key = lang + voc_filename if cache_key not in self.voc_match_cache: # Check for both skill resources and mycroft-core resources voc = self.find_resource(voc_filename + '.voc', 'vocab') if not voc: voc = self.resolve_resource_file( join('text', lang, voc_filename + '.voc')) if not voc or not exists(voc): raise FileNotFoundError( 'Could not find {}.voc file'.format(voc_filename)) with open(voc) as f: self.voc_match_cache[cache_key] = f.read().splitlines() # Check for exact match if utt and any(i.strip() == utt for i in self.voc_match_cache[cache_key]): return True return False def initialize(self): self.audio_service = AudioService(self.bus) self.add_event('play:query.response', self.handle_play_query_response) self.add_event('play:status', self.handle_song_info) self.gui.register_handler('next', self.handle_next) self.gui.register_handler('prev', self.handle_prev) self.clear_gui_info() # Handle common audio intents. 'Audio' skills should listen for the # common messages: # self.add_event('mycroft.audio.service.next', SKILL_HANDLER) # self.add_event('mycroft.audio.service.prev', SKILL_HANDLER) # self.add_event('mycroft.audio.service.pause', SKILL_HANDLER) # self.add_event('mycroft.audio.service.resume', SKILL_HANDLER) def clear_gui_info(self): """Clear the gui variable list.""" # Initialize track info variables for k in STATUS_KEYS: self.gui[k] = '' @intent_handler(IntentBuilder('').require('Next').require("Track")) def handle_next(self, message): self.audio_service.next() @intent_handler(IntentBuilder('').require('Prev').require("Track")) def handle_prev(self, message): self.audio_service.prev() @intent_handler(IntentBuilder('').require('Pause')) def handle_pause(self, message): self.audio_service.pause() @intent_handler(IntentBuilder('').one_of('PlayResume', 'Resume')) def handle_play(self, message): """Resume playback if paused""" self.audio_service.resume() def stop(self, message=None): self.clear_gui_info() self.log.info('Audio service status: ' '{}'.format(self.audio_service.track_info())) if self.audio_service.is_playing: self.audio_service.stop() return True else: return False def converse(self, utterances, lang="en-us"): if (utterances and self.has_played and self.voc_match_exact(utterances[0], "converse_resume")): # NOTE: voc_match() will overmatch (e.g. it'll catch "play next # song" or "play Some Artist") self.audio_service.resume() return True else: return False @intent_handler(IntentBuilder('').require('Play').require('Phrase')) def play(self, message): self.speak_dialog("just.one.moment") # Remove everything up to and including "Play" # NOTE: This requires a Play.voc which holds any synomyms for 'Play' # and a .rx that contains each of those synonyms. E.g. # Play.voc # play # bork # phrase.rx # play (?P<Phrase>.*) # bork (?P<Phrase>.*) # This really just hacks around limitations of the Adapt regex system, # which will only return the first word of the target phrase utt = message.data.get('utterance') phrase = re.sub('^.*?' + message.data['Play'], '', utt).strip() self.log.info("Resolving Player for: " + phrase) wait_while_speaking() self.enclosure.mouth_think() # Now we place a query on the messsagebus for anyone who wants to # attempt to service a 'play.request' message. E.g.: # { # "type": "play.query", # "phrase": "the news" / "tom waits" / "madonna on Pandora" # } # # One or more skills can reply with a 'play.request.reply', e.g.: # { # "type": "play.request.response", # "target": "the news", # "skill_id": "<self.skill_id>", # "conf": "0.7", # "callback_data": "<optional data>" # } # This means the skill has a 70% confidence they can handle that # request. The "callback_data" is optional, but can provide data # that eliminates the need to re-parse if this reply is chosen. # self.query_replies[phrase] = [] self.query_extensions[phrase] = [] self.bus.emit(message.forward('play:query', data={"phrase": phrase})) self.schedule_event(self._play_query_timeout, 1, data={"phrase": phrase}, name='PlayQueryTimeout') def handle_play_query_response(self, message): with self.lock: search_phrase = message.data["phrase"] if ("searching" in message.data and search_phrase in self.query_extensions): # Manage requests for time to complete searches skill_id = message.data["skill_id"] if message.data["searching"]: # extend the timeout by 5 seconds self.cancel_scheduled_event("PlayQueryTimeout") self.schedule_event(self._play_query_timeout, 5, data={"phrase": search_phrase}, name='PlayQueryTimeout') # TODO: Perhaps block multiple extensions? if skill_id not in self.query_extensions[search_phrase]: self.query_extensions[search_phrase].append(skill_id) else: # Search complete, don't wait on this skill any longer if skill_id in self.query_extensions[search_phrase]: self.query_extensions[search_phrase].remove(skill_id) if not self.query_extensions[search_phrase]: self.cancel_scheduled_event("PlayQueryTimeout") self.schedule_event(self._play_query_timeout, 0, data={"phrase": search_phrase}, name='PlayQueryTimeout') elif search_phrase in self.query_replies: # Collect all replies until the timeout self.query_replies[message.data["phrase"]].append(message.data) def _play_query_timeout(self, message): with self.lock: # Prevent any late-comers from retriggering this query handler search_phrase = message.data["phrase"] self.query_extensions[search_phrase] = [] self.enclosure.mouth_reset() # Look at any replies that arrived before the timeout # Find response(s) with the highest confidence best = None ties = [] self.log.debug("CommonPlay Resolution: {}".format(search_phrase)) for handler in self.query_replies[search_phrase]: self.log.debug(" {} using {}".format( handler["conf"], handler["skill_id"])) if not best or handler["conf"] > best["conf"]: best = handler ties = [] elif handler["conf"] == best["conf"]: ties.append(handler) if best: if ties: # TODO: Ask user to pick between ties or do it # automagically pass # invoke best match self.gui.show_page("controls.qml", override_idle=True) self.log.info("Playing with: {}".format(best["skill_id"])) start_data = { "skill_id": best["skill_id"], "phrase": search_phrase, "callback_data": best.get("callback_data") } self.bus.emit(message.forward('play:start', start_data)) self.has_played = True elif self.voc_match(search_phrase, "Music"): self.speak_dialog("setup.hints") else: self.log.info(" No matches") self.speak_dialog("cant.play", data={"phrase": search_phrase}) if search_phrase in self.query_replies: del self.query_replies[search_phrase] if search_phrase in self.query_extensions: del self.query_extensions[search_phrase] def handle_song_info(self, message): changed = False for key in STATUS_KEYS: val = message.data.get(key, '') changed = changed or self.gui[key] != val self.gui[key] = val if changed: self.log.info('\n-->Track: {}\n-->Artist: {}\n-->Image: {}' ''.format(self.gui['track'], self.gui['artist'], self.gui['image']))