def play(self, results, language): collection = MPTitleCollection() collection.items = [] for result in results: if not hasattr(result, 'genre'): result.genre = "" if not hasattr(result, 'trackNumber'): result.trackNumber = "" if not hasattr(result, 'artist'): result.artist = "" if not hasattr(result, 'title'): result.title = "" if not hasattr(result, 'sortTitle'): result.sortTitle = "" if not hasattr(result, 'playCount'): result.playCount = "" if not hasattr(result, 'rating'): result.rating = "" if not hasattr(result, 'album'): result.album = "" if not hasattr(result, 'identifier'): result.identifier = "" song = MPSong() song.album = result.album song.artist = result.artist song.genre = result.genre song.playCount = result.playCount song.rating = result.rating song.sortTitle = result.sortTitle song.title = result.title song.trackNumber = result.trackNumber song.identifier = result.identifier collection.items.append(song) collection.sortTitle = result.title collection.title = result.sortTitle collection.identifier = result.identifier complete = MPSetQueue(self.refId) complete.mediaItems = collection self.getResponseForRequest(complete) commands = MPSetState(self.refId) commands.state = "Playing" commands2 = MPEnableShuffle(self.refId) commands2.enable = False code = 0 root = UIAddViews(self.refId) root.dialogPhase = "Summary" assistant = UIAssistantUtteranceView() assistant.dialogIdentifier = "PlayMedia#nowPlayingMediaItemByTitle" assistant.speakableText = assistant.text = res["play"][language] root.views = [(assistant)] root.callbacks = [ResultCallback([commands, commands2], code)] callback = [ResultCallback([root], code)] self.send_object(RequestCompleted(self.refId, callback)) self.complete_request()
def play(self, results, language): collection = MPTitleCollection() collection.items = [] for result in results: if not hasattr(result, "genre"): result.genre = "" if not hasattr(result, "trackNumber"): result.trackNumber = "" if not hasattr(result, "artist"): result.artist = "" if not hasattr(result, "title"): result.title = "" if not hasattr(result, "sortTitle"): result.sortTitle = "" if not hasattr(result, "playCount"): result.playCount = "" if not hasattr(result, "rating"): result.rating = "" if not hasattr(result, "album"): result.album = "" if not hasattr(result, "identifier"): result.identifier = "" song = MPSong() song.album = result.album song.artist = result.artist song.genre = result.genre song.playCount = result.playCount song.rating = result.rating song.sortTitle = result.sortTitle song.title = result.title song.trackNumber = result.trackNumber song.identifier = result.identifier collection.items.append(song) collection.sortTitle = result.title collection.title = result.sortTitle collection.identifier = result.identifier complete = MPSetQueue(self.refId) complete.mediaItems = collection self.getResponseForRequest(complete) commands = MPSetState(self.refId) commands.state = "Playing" commands2 = MPEnableShuffle(self.refId) commands2.enable = False code = 0 root = UIAddViews(self.refId) root.dialogPhase = "Summary" assistant = UIAssistantUtteranceView() assistant.dialogIdentifier = "PlayMedia#nowPlayingMediaItemByTitle" assistant.speakableText = assistant.text = res["play"][language] root.views = [(assistant)] root.callbacks = [ResultCallback([commands, commands2], code)] callback = [ResultCallback([root], code)] self.send_object(RequestCompleted(self.refId, callback)) self.complete_request()
def process_recognized_speech(self, googleJson, requestId, dictation): possible_matches = googleJson['hypotheses'] if len(possible_matches) > 0: best_match = possible_matches[0]['utterance'] if len(best_match) == 1: best_match = best_match.upper() else: best_match = best_match[0].upper() + best_match[1:] best_match_confidence = possible_matches[0]['confidence'] self.logger.info(u"Best matching result: \"{0}\" with a confidence of {1}%".format(best_match, round(float(best_match_confidence) * 100, 2))) # construct a SpeechRecognized token = Token(best_match, 0, 0, 1000.0, True, True) interpretation = Interpretation([token]) phrase = Phrase(lowConfidence=False, interpretations=[interpretation]) recognition = Recognition([phrase]) recognized = SpeechRecognized(requestId, recognition) if not dictation: if self.current_running_plugin == None: plugin = PluginManager.getPluginForImmediateExecution(self.assistant.assistantId, best_match, self.assistant.language, (self.send_object, self.send_plist, self.assistant, self.current_location)) if plugin != None: plugin.refId = requestId plugin.connection = self self.current_running_plugin = plugin self.send_object(recognized) self.current_running_plugin.start() else: self.send_object(recognized) view = UIAddViews(requestId) errorText = SiriProtocolHandler.__not_recognized[self.assistant.language] if self.assistant.language in SiriProtocolHandler.__not_recognized else SiriProtocolHandler.__not_recognized["en-US"] errorView = UIAssistantUtteranceView() errorView.text = errorText.format(best_match) errorView.speakableText = errorText.format(best_match) view.views = [errorView] websearchText = SiriProtocolHandler.__websearch[self.assistant.language] if self.assistant.language in SiriProtocolHandler.__websearch else SiriProtocolHandler.__websearch["en-US"] button = UIButton() button.text = websearchText cmd = SendCommands() cmd.commands = [StartRequest(utterance=u"^webSearchQuery^=^{0}^^webSearchConfirmation^=^Yes^".format(best_match))] button.commands = [cmd] view.views.append(button) self.send_object(view) self.send_object(RequestCompleted(requestId)) elif self.current_running_plugin.waitForResponse != None: # do we need to send a speech recognized here? i.d.k self.current_running_plugin.response = best_match self.current_running_plugin.refId = requestId self.current_running_plugin.waitForResponse.set() else: self.send_object(recognized) self.send_object(RequestCompleted(requestId)) else: self.send_object(recognized) self.send_object(RequestCompleted(requestId))
def resume(self, language): commands = MPSetState(self.refId) commands.state = "Playing" code = 0 root = UIAddViews(self.refId) root.dialogPhase = "Summary" assistant = UIAssistantUtteranceView() assistant.dialogIdentifier = "PlayMedia#SkipToNext" assistant.speakableText = assistant.text = res["resume"][language] root.views = [(assistant)] root.callbacks = [ResultCallback([commands], code)] callback = [ResultCallback([root], code)] self.send_object(RequestCompleted(self.refId, callback)) self.complete_request()
def pause(self, language): commands = MPSetState(self.refId) commands.state = "Paused" code = 0 root = UIAddViews(self.refId) root.dialogPhase = "Summary" assistant = UIAssistantUtteranceView() assistant.dialogIdentifier = "PlayMedia#Paused" assistant.speakableText = assistant.text = res["pause"][language] root.views = [(assistant)] root.callbacks = [ResultCallback([commands], code)] callback = [ResultCallback([root], code)] self.send_object(RequestCompleted(self.refId, callback)) self.complete_request()
def process_recognized_speech(self, googleJson, requestId, dictation): possible_matches = googleJson['hypotheses'] if len(possible_matches) > 0: best_match = possible_matches[0]['utterance'] best_match_confidence = possible_matches[0]['confidence'] self.logger.info(u"Best matching result: \"{0}\" with a confidence of {1}%".format(best_match, round(float(best_match_confidence) * 100, 2))) # construct a SpeechRecognized token = Token(best_match, 0, 0, 1000.0, True, True) interpretation = Interpretation([token]) phrase = Phrase(lowConfidence=False, interpretations=[interpretation]) recognition = Recognition([phrase]) recognized = SpeechRecognized(requestId, recognition) if not dictation: if self.current_running_plugin == None: plugin = PluginManager.getPluginForImmediateExecution(self.assistant.assistantId, best_match, self.assistant.language, (self.send_object, self.send_plist, self.assistant, self.current_location)) if plugin != None: plugin.refId = requestId plugin.connection = self self.current_running_plugin = plugin self.send_object(recognized) self.current_running_plugin.start() else: self.send_object(recognized) view = UIAddViews(requestId) errorText = SiriProtocolHandler.__not_recognized[self.assistant.language] if self.assistant.language in SiriProtocolHandler.__not_recognized else SiriProtocolHandler.__not_recognized["en-US"] errorView = UIAssistantUtteranceView() errorView.text = errorText.format(best_match) errorView.speakableText = errorText.format(best_match) view.views = [errorView] websearchText = SiriProtocolHandler.__websearch[self.assistant.language] if self.assistant.language in SiriProtocolHandler.__websearch else SiriProtocolHandler.__websearch["en-US"] button = UIButton() button.text = websearchText cmd = SendCommands() cmd.commands = [StartRequest(utterance=u"^webSearchQuery^=^{0}^^webSearchConfirmation^=^Yes^".format(best_match))] button.commands = [cmd] view.views.append(button) self.send_object(view) self.send_object(RequestCompleted(requestId)) elif self.current_running_plugin.waitForResponse != None: self.send_object(recognized) self.current_running_plugin.response = best_match self.current_running_plugin.refId = requestId self.current_running_plugin.waitForResponse.set() else: self.send_object(recognized) self.send_object(RequestCompleted(requestId)) else: self.send_object(recognized) self.send_object(RequestCompleted(requestId))
def back(self, language): commands = MPSetState(self.refId) commands.state = "Playing" commands2 = MPSetPlaybackPosition(self.refId) commands2.position = "PreviousItem" code = 0 root = UIAddViews(self.refId) root.dialogPhase = "Summary" assistant = UIAssistantUtteranceView() assistant.dialogIdentifier = "PlayMedia#Previous" assistant.speakableText = assistant.text = res["back"][language] root.views = [(assistant)] root.callbacks = [ResultCallback([commands, commands2], code)] callback = [ResultCallback([root], code)] self.send_object(RequestCompleted(self.refId, callback)) self.complete_request()
def beginning(self, language): commands = MPSetState(self.refId) commands.state = "Playing" commands2 = MPSetPlaybackPosition(self.refId) commands2.position = "Beginning" code = 0 root = UIAddViews(self.refId) root.dialogPhase = "Summary" assistant = UIAssistantUtteranceView() assistant.dialogIdentifier = "PlayMedia#SkipToBeginning" assistant.speakableText = assistant.text = res["beginning"][language] root.views = [(assistant)] root.callbacks = [ResultCallback([commands, commands2], code)] callback = [ResultCallback([root], code)] self.send_object(RequestCompleted(self.refId, callback)) self.complete_request()