def play_episode(self, podcast, episode): try: if self.player is not None and self.player.is_playing(): self.stop_playback() self.current_podcast = podcast self.current_episode = episode url = episode.enclosure_url LOG.info(url) playback_position = self.load_episode_playback_state( podcast, episode) LOG.info("Starting at " + str(playback_position) + " for [" + episode.guid + "] " + episode.title) if playback_position > 0: self.speak_dialog("resume", {"episode_title": episode.title}) else: self.speak_dialog("playing", {"episode_title": episode.title}) wait_while_speaking() self.player = vlc.MediaPlayer(episode.enclosure_url) self.add_event('recognizer_loop:record_begin', self.handle_listener_started) self.player.play() self.player.set_time(playback_position) except Exception as e: LOG.error("Playback position type is " + type(playback_position)) LOG.error(str(playback_position)) self.reset_now_playing()
def find_station(self, search_term): tracklist = [] retry = True search_term = self.remove_aliases(search_term) dom = request_api(search_term) # results are each in their own <outline> tag as defined by OPML (https://en.wikipedia.org/wiki/OPML) # fuzzy matches the query to the given stations NodeList match, perc = _fuzzy_match(search_term, dom.getElementsByTagName("outline")) # No matching stations if match is None: self.speak_dialog("not.found") wait_while_speaking() LOG.debug("Could not find a station with the query term: " + search_term) return # stop the current stream if we have one running if self.audio_state == "playing": self.stop() # Ignore entries that are marked as unavailable self.mpeg_url = match.getAttribute("URL") self.station_name = match.getAttribute("text") # this URL will return audio/x-mpegurl data. This is just a list of URLs to the real streams self.stream_url = self.get_stream_url(self.mpeg_url) self.audio_state = "playing" self.speak_dialog("now.playing", {"station": self.station_name}) wait_while_speaking() LOG.debug("Station: " + self.station_name) LOG.debug("Station name fuzzy match percent: " + str(perc)) LOG.debug("Stream URL: " + self.stream_url) tracklist.append(self.stream_url) self.mediaplayer.add_list(tracklist) self.mediaplayer.play()
def given_no_alarms(context): followups = [ 'ask.cancel.alarm.plural', 'ask.cancel.desc.alarm', 'ask.cancel.desc.alarm.recurring' ] no_alarms = ['alarms.list.empty'] cancelled = [ 'alarm.cancelled.desc', 'alarm.cancelled.desc.recurring', 'alarm.cancelled.multi', 'alarm.cancelled.recurring' ] print('ASKING QUESTION') emit_utterance(context.bus, 'cancel all alarms') for i in range(10): for message in context.bus.get_messages('speak'): if message.data.get('meta', {}).get('dialog') in followups: print('Answering yes!') time.sleep(1) wait_while_speaking() emit_utterance(context.bus, 'yes') wait_for_dialog(context.bus, cancelled) context.bus.clear_messages() return elif message.data.get('meta', {}).get('dialog') in no_alarms: context.bus.clear_messages() return time.sleep(1) context.bus.clear_messages()
def right(self): self.enclosure.mouth_text( "CORRECT!" ) self.speak_dialog("correct") wait_while_speaking() self.play( 'true.wav' ) self.score(1) return
def handle_trivia_intent(self): self.enclosure.deactivate_mouth_events() # Display icon on faceplate self.enclosure.mouth_display("aIMAMAMPMPMPMAMAAPAPADAAIOIOAAAHAMAMAHAAIOIOAAAPAFAFAPAAMLMLAAAAAA", x=1, y=0, refresh=True) time.sleep(2) self.settings['cat'] = None self.settings['question'] = None self.settings['answers'] = None self.settings['myanswer'] = None self.settings['correct_answer'] = None self.settings['resdir'] = '/opt/mycroft/skills/skill-trivia/res/' url = "https://opentdb.com/api.php?amount=5&type=multiple" headers = {'Accept': 'text/plain'} r = requests.get(url, headers) m = json.loads(r.text) questions = m['results']; global score score = 0 self.play( 'intro.wav' ) self.speak("Okay, lets play a game of trivia. Get ready!") wait_while_speaking() for f in questions: self.enclosure.activate_mouth_events() self.enclosure.mouth_reset() self.preparequestion( f['category'], f['question'], f['incorrect_answers'], f['correct_answer']) self.endgame(score)
def _do_net_check(self): # TODO: This should live in the derived Enclosure, e.g. EnclosureMark1 LOG.info("Checking internet connection") if not connected(): # and self.conn_monitor is None: if has_been_paired(): # TODO: Enclosure/localization self.speak("This unit is not connected to the Internet. " "Either plug in a network cable or hold the " "button on top for two seconds, then select " "wifi from the menu") else: # Begin the unit startup process, this is the first time it # is being run with factory defaults. # TODO: This logic should be in EnclosureMark1 # TODO: Enclosure/localization # Don't listen to mic during this out-of-box experience self.bus.emit(Message("mycroft.mic.mute")) # Setup handler to unmute mic at the end of on boarding # i.e. after pairing is complete self.bus.once('mycroft.paired', self._handle_pairing_complete) self.speak(mycroft.dialog.get('mycroft.intro')) wait_while_speaking() time.sleep(2) # a pause sounds better than just jumping in # Kick off wifi-setup automatically data = {'allow_timeout': False, 'lang': self.lang} self.bus.emit(Message('system.wifi.setup', data))
def tell_story(self, url, bookmark): self.is_reading = True title = self.get_title(url) subtitle = self.get_subtitle(url) self.speak_dialog('title_by_author', data={ 'title': title, 'subtitle': subtitle }) time.sleep(1) self.log.info(url) lines = self.get_story(url).split('\n\n') for line in lines[bookmark:]: self.settings['bookmark'] += 1 time.sleep(.5) if self.is_reading is False: break sentenses = line.split('. ') for sentens in sentenses: if self.is_reading is False: break else: wait_while_speaking() self.speak(sentens, wait=True) if self.is_reading is True: self.is_reading = False self.settings['bookmark'] = 0 self.settings['story'] = None time.sleep(2) self.speak_dialog('from_AndersensTales')
def _do_net_check(self): # TODO: This should live in the derived Enclosure, e.g. EnclosureMark1 LOG.info("Checking internet connection") if not connected(): # and self.conn_monitor is None: if has_been_paired(): # TODO: Enclosure/localization self.speak("This unit is not connected to the Internet. " "Either plug in a network cable or setup your " "wifi connection.") else: # Begin the unit startup process, this is the first time it # is being run with factory defaults. # TODO: This logic should be in EnclosureMark1 # TODO: Enclosure/localization # Don't listen to mic during this out-of-box experience self.bus.emit(Message("mycroft.mic.mute")) # Setup handler to unmute mic at the end of on boarding # i.e. after pairing is complete self.bus.once('mycroft.paired', self._handle_pairing_complete) self.speak(mycroft.dialog.get('mycroft.intro')) wait_while_speaking() time.sleep(2) # a pause sounds better than just jumping in # Kick off wifi-setup automatically data = {'allow_timeout': False, 'lang': self.lang} self.bus.emit(Message('system.wifi.setup', data))
def handle_end_timer(self, message): """ callback for _schedule_alarm_event scheduled_event() Args: message (Message): object passed by messagebus """ alarm_name = message.data self.cancel_timer(alarm_name) self.speak("{} alarm is up".format(alarm_name[:-1])) wait_while_speaking() self.notify() # handle recurring recurring = False for alarm_object in self.settings['alarms']: if alarm_object['name'] == alarm_name[:-1]: if alarm_object['recurring'] is True: nearest_arrow, index = \ self.get_nearest_date_from_now( alarm_object['arrow_objects']) arrow_object = nearest_arrow.shift(weeks=+1) time = arrow_object.datetime self._schedule_alarm_event(alarm_name, time) if index is not None: alarm_object['arrow_objects'].pop(index) alarm_object['arrow_objects'].append(str(arrow_object)) recurring = True if recurring is False: self.remove_alarm(alarm_name[:-1])
def on_message(self, mqttc, obj, msg): # called when a new MQTT message is received # Sample Payload {"source":"basement", "message":"is dinner ready yet"} LOG.info('message received for location id: ' + str(self.location_id)) LOG.info("This device location is: " + DeviceApi().get()["description"]) try: mqtt_message = msg.payload.decode('utf-8') LOG.info(msg.topic + " " + str(msg.qos) + ", " + mqtt_message) new_message = json.loads(mqtt_message) if "command" in new_message: # example: {"source":"kitchen", "command":"what time is it"} LOG.info('Command Received! - ' + new_message["command"] + ', From: ' + new_message["source"]) self.response_location = new_message["source"] self.send_message(new_message["command"]) elif "message" in new_message: # example: {"source":"kitchen", "message":"is dinner ready yet"} self.response_location = '' LOG.info('Message Received! - ' + new_message["message"] + ', From: ' + new_message["source"]) self.speak_dialog('location', data={"result": new_message["source"]}, expect_response=False) wait_while_speaking() self.speak_dialog('message', data={"result": new_message["message"]}, expect_response=False) else: LOG.info('Unable to decode the MQTT Message') except Exception as e: LOG.error('Error: {0}'.format(e))
def handle_intent(self, message): try: data = feedparser.parse(self.url_rss) # Stop anything already playing self.stop() self.speak_dialog('news') wait_while_speaking() # After the intro, find and start the news stream i = 0 found_audio = False # select the first link to an audio file for link in data['entries'][0]['links']: if 'audio' in link['type']: found_audio = True break i = i + 1 if not found_audio: # fall back to using the first link in the entry i = 0 url = re.sub('https', 'http', data['entries'][0]['links'][i]['href']) # if audio service module is available use it if self.audioservice: self.audioservice.play(url, message.data['utterance']) else: # othervice use normal mp3 playback self.process = play_mp3(url) except Exception as e: LOG.error("Error: {0}".format(e))
def search_youtube(self, search_term): tracklist = [] res = requests.get(search_url + search_term) # TODO: check status code etc... html = res.content soup = BeautifulSoup(html, 'html.parser') vids = soup.findAll(attrs={'class': 'yt-uix-tile-link'}) for vid in vids: if not re.match('/watch\?v=\w{11}', vid['href']): LOG.debug('no media: ' + vid['href']) continue self.vid_url = vid['href'] self.vid_name = vid.string self.stream_url = self.get_stream_url(self.vid_url) LOG.debug('Found stream URL: ' + self.vid_url) LOG.debug('Media title: ' + self.vid_name) tracklist.append(self.stream_url) self.mediaplayer.add_list(tracklist) self.audio_state = 'playing' self.speak_dialog('now.playing') wait_while_speaking() self.mediaplayer.play() return # We didn't find any playable results self.speak_dialog('not.found') wait_while_speaking() LOG.debug('Could not find any results with the query term: ' + search_term)
def handle_get_All_available_intent(self, message): self.path = self.usbdevice.MountSMBPath(self.smb_path, self.smb_uname, self.smb_pass) self.speak_dialog( 'update.library', data={"source": str(message.data.get("MusicKeyword"))}, expect_response=False) wait_while_speaking() self.song_list = [ i for i in self.song_list if not (i['type'] == 'smb') ] self.song_list = self.merge_library( self.song_list, self.create_library(self.path, "smb")) LOG.info("SMB Mounted!") self.path = self.local_path self.song_list = [ i for i in self.song_list if not (i['type'] == 'local') ] self.song_list = self.merge_library( self.song_list, self.create_library(self.path, "local")) LOG.info("Local Mounted!") if self.usbdevice.isDeviceConnected(): device = self.usbdevice.getDevData() # mount the device and get the path self.path = self.usbdevice.getMountPathUsbDevice() self.speak_dialog( 'update.library', data={"source": str(message.data.get("USBKeyword"))}, expect_response=False) wait_while_speaking() self.song_list = [ i for i in self.song_list if not (i['type'] == 'usb') ] self.song_list = self.merge_library( self.song_list, self.create_library(self.path, "usb"))
def handle_random_intent(self, message): try: self.stop() random_episode = random.randint(0, 14) feeddata = feedparser.parse(self.url_rss) data = feeddata.entries[random_episode] # Stop anything already playing url = data.enclosures[0]['url'] LOG.info('random') LOG.info(random_episode) LOG.info(url) # After the intro, start the no agenda stream # if audio service module is available use it wait_while_speaking() if self.audioservice: LOG.info('AudioService') self.audioservice.play(url, message.data['utterance']) else: # othervice use normal mp3 playback LOG.info('playmp3') self.process = play_mp3(url) except Exception as e: LOG.error("Error: {0}".format(e))
def auto_alert_handler(self): self.log.info("auto_alert_handler") self._check_for_alerts(self.zone_id) filteredalerts = [] for ap in self.alerts: #ap = alert['properties'] status = ap['status'] msgType = ap['messageType'] urgency = ap['urgency'] severity = ap['severity'] certainty = ap['certainty'] # filter messages: only actual and severity/certainty/urgency according to skill setting if status == 'Actual' and msgType in [ 'Alert' ] and severity in "Extreme,Severe" and certainty in "Observed" and urgency in "Immediate": filteredalerts.append(ap) self.log.info("found alerts: {}".format(len(filteredalerts))) if filteredalerts: self.status = "speaking" self.speak_dialog("alerts.noaa") for alert in filteredalerts: #event = alert["event"] headline = alert['headline'] instruction = alert['instruction'] wait_while_speaking() #if self.status == "speaking": # self.speak(event) if self.status == "speaking": self.speak(headline) if self.status == "speaking": self.speak(instruction)
def speak(self, utterance, expect_response=False, wait=False, meta=None): """Speak a sentence. Arguments: utterance (str): sentence mycroft should speak expect_response (bool): set to True if Mycroft should listen for a response immediately after speaking the utterance. wait (bool): set to True to block while the text is being spoken. meta: Information of what built the sentence. """ # registers the skill as being active meta = meta or {} meta['skill'] = self.name self.enclosure.register(self.name) data = {'utterance': utterance, 'expect_response': expect_response, 'meta': meta} message = dig_for_message() m = message.forward("speak", data) if message \ else Message("speak", data) self.bus.emit(m) if wait: wait_while_speaking()
def tell_story(self, url, bookmark): self.is_reading = True self.settings['bookmark'] = bookmark if bookmark == 0: title = self.get_title(url) author = self.get_author(url) self.speak_dialog('title_by_author', data={ 'title': title, 'author': author }) time.sleep(1) lines = self.get_story(url) for line in lines[bookmark:]: self.settings['bookmark'] += 1 time.sleep(.5) if self.is_reading is False: break sentenses = line.split('. ') for sentens in sentenses: if self.is_reading is False: break else: wait_while_speaking() self.speak(sentens, wait=True) if self.is_reading is True: self.is_reading = False self.settings['bookmark'] = 0 self.settings['story'] = None time.sleep(2) self.speak_dialog('from_fairytalez')
def mesure_mic_thresh(self, message): if self.autovolume and not self.audio_service.is_playing: wait_while_speaking() with io.open(self.filename, 'r') as fh: while True: line = fh.readline() if line == "": break # Ex:Energy: cur=4 thresh=1.5 parts = line.split("=") meter_thresh = float(parts[-1]) self.meter_thresh_list.append(meter_thresh) if len(self.meter_thresh_list) > 120: self.meter_thresh_list.pop(1) self.meter_thresh = sum(self.meter_thresh_list) / float( len(self.meter_thresh_list)) if self.meter_thresh < self.settings.get( 'Lowest messurement'): self.settings['Lowest messurement'] = self.meter_thresh if self.meter_thresh > self.settings.get( 'Highest messurement'): self.settings[ 'Highest messurement'] = self.meter_thresh
def auto_set_volume(self, message): if self.autovolume and not self.audio_service.is_playing: wait_while_speaking() volume = int(self.settings.get('Normal volume')) range = self.settings.get( 'Highest messurement') - self.settings.get( 'Lowest messurement') high_level = self.settings.get('Highest messurement') - ( (10 * range) / 100) low_level = self.settings.get('Lowest messurement') + ( (10 * range) / 100) if self.meter_thresh > high_level: volume = self.settings.get('High volume') if self.meter_thresh < low_level: volume = self.settings.get('Low volume') if volume != self.volume and volume is not None: self.mixer.setvolume(int(volume)) self.volume = volume self.log.info("Mic thresh: " + str(self.meter_thresh) + " Low level: " + str(low_level) + " High level: " + str(high_level)) self.log.info("Setting volume to :" + str(volume) + "%")
def after_scenario(context, scenario): """Wait for mycroft completion and reset any changed state.""" # TODO wait for skill handler complete wait_while_speaking() context.bus.clear_all_messages() context.matched_message = None context.step_timeout = 10 # Reset the step_timeout to 10 seconds
def handle_go_to_sleep(self, message): """ Sends a message to the speech client setting the listener in a sleep mode. """ self.speak_dialog("going.to.sleep") self.emitter.emit(Message('recognizer_loop:sleep')) self.sleeping = True wait_while_speaking() time.sleep(2) wait_while_speaking() self.enclosure.eyes_narrow() # Dim and look downward to 'go to sleep' # TODO: Get current brightness from somewhere self.old_brightness = 30 for i in range(0, (self.old_brightness - 10) / 2): self.enclosure.eyes_brightness(self.old_brightness - i * 2) time.sleep(0.1) time.sleep(0.5) # gives the brightness command time to finish self.enclosure.eyes_look("d") if self.config_core.get("enclosure").get("platform", "unknown") != "unknown": self.emitter.emit( Message('mycroft.volume.mute', data={"speak_message": False}))
def handle_go_to_sleep(self, message): """ Sends a message to the speech client setting the listener in a sleep mode. If the user has been told about the waking up process five times already, it sends a shorter message. """ count = self.settings.get('Wake up count', 0) count += 1 self.settings['Wake up count'] = count if count <= 5: self.speak_dialog('going.to.sleep', {'wake_word': self.wake_word}) else: self.speak_dialog('going.to.sleep.short') self.bus.emit(Message('recognizer_loop:sleep')) self.sleeping = True self.started_by_skill = True wait_while_speaking() time.sleep(2) wait_while_speaking() # Dim and look downward to 'go to sleep' # TODO: Get current brightness from somewhere self.old_brightness = 30 for i in range(0, (self.old_brightness - 10) // 2): self.enclosure.eyes_brightness(self.old_brightness - i * 2) time.sleep(0.15) self.enclosure.eyes_look("d") if self.config_core.get("enclosure").get("platform", "unknown") != "unknown": self.bus.emit( Message('mycroft.volume.mute', data={"speak_message": False}))
def handle_whitenoise_time(self, message): utterance = message.data.get('utterance') # Calculate how long to record self.start_time = now_local() stop_time, _ = extract_datetime(utterance, lang=self.lang) self.settings["duration"] = (stop_time - self.start_time).total_seconds() if self.settings["duration"] <= 0: self.settings["duration"] = 60 # default recording duration # Initiate white noise path = random.choice(self.play_list_all) try: time_for = nice_duration(self, self.settings["duration"], lang=self.lang) self.speak_dialog('whitenoise.response.time', {'duration': time_for}) wait_while_speaking() self.audioservice.play(path) # self.process = play_mp3(self.play_list_all[0]) self.enclosure.eyes_color(255, 0, 0) # set color red self.last_index = 24 self.schedule_repeating_event(self.recording_feedback, None, 1, name='RecordingFeedback') except Exception as e: self.log.error("Error: {0}".format(e))
def handle_query_intent(self, message): if not self.isConfigured(): return self.speak_dialog("querying") feedData, epData = self.getUnacquired() if self.settings.get("useWatched"): feedData['type'] = "unwatched" if feedData['total'] == 0: self.speak_dialog('noNewEpisodes', data=feedData) return if feedData['airingToday'] > 0: self.speak_dialog('newEpisodesWithAiringToday', data=feedData) else: self.speak_dialog('newEpisodes', data=feedData) self.speakEpisodesDetails(epData) wait_while_speaking() if not self.settings.get("useWatched"): feedData, _ = self.getUnwatched() if feedData['total'] > 0: self.speak_dialog("unwatchedEpisodes", data=feedData)
def find_station(self, search_term, utterance = None): payload = { "query" : search_term } # get the response from the TuneIn API res = requests.post(base_url, data=payload, headers=headers) dom = parseString(res.text) # results are each in their own <outline> tag as defined by OPML (https://en.wikipedia.org/wiki/OPML) entries = dom.getElementsByTagName("outline") # Loop through outlines in the lists for entry in entries: # Only look at outlines that are of type=audio and item=station if (entry.getAttribute("type") == "audio") and (entry.getAttribute("item") == "station"): if (entry.getAttribute("key") != "unavailable"): # Ignore entries that are marked as unavailable self.mpeg_url = entry.getAttribute("URL") self.station_name = entry.getAttribute("text") # this URL will return audio/x-mpegurl data. This is just a list of URLs to the real streams self.stream_url = self.get_stream_url(self.mpeg_url) LOG.debug("Found stream URL: " + self.stream_url) self.audio_service = AudioService(self.bus) self.speak_dialog("now.playing", {"station": self.station_name} ) wait_while_speaking() self.audio_service.play(self.stream_url, utterance) return # We didn't find any playable stations self.speak_dialog("not.found") wait_while_speaking() LOG.debug("Could not find a station with the query term: " + search_term)
def clear_queue_and_play(self, playlist_items, playlist_type): result = None try: result = playlist_clear(self.kodi_path, playlist_type) if "OK" in result.text: result = None LOG.info("Clear Playlist Successful") result = create_playlist(self.kodi_path, playlist_items, playlist_type) if "OK" in result.text: result = None LOG.info("Add Playlist Successful") wait_while_speaking() self.speak_dialog("now.playing", data={"result_type": str(playlist_type)}, expect_response=False) time.sleep(2) # wait for playlist before playback result = play_normal(self.kodi_path, playlist_type) if "OK" in result.text: LOG.info("Now Playing..." + str(result.text)) result = None except Exception as e: LOG.info('An error was detected in: clear_queue_and_play') LOG.error(e) self.on_websettings_changed()
def handle_mymillenials_intent(self, message): try: self.stop() self.speak_dialog('NoAgenda') feeddata = feedparser.parse(self.url_rss) data = feeddata.entries[0] # Stop anything already playing url = data.enclosures[0]['url'] LOG.info('mymillenials') LOG.info(url) # After the intro, start the no agenda stream # if audio service module is available use it sleep(1.0) wait_while_speaking() if self.audioservice: LOG.info('AudioService') self.audioservice.play(url, message.data['utterance']) else: # othervice use normal mp3 playback LOG.info('playmp3') self.process = play_mp3(url) except Exception as e: LOG.error("Error: {0}".format(e))
def handle_move_cursor_intent( self, message): # a request was made to move the kodi cursor """ This routine will move the kodi cursor Context is set so the user only has to say the direction on future navigation """ self.set_context('MoveKeyword', 'move') self.set_context('CursorKeyword', 'cursor') direction_kw = None if "UpKeyword" in message.data: direction_kw = "Up" # these english words are required by the kodi api if "DownKeyword" in message.data: direction_kw = "Down" # these english words are required by the kodi api if "LeftKeyword" in message.data: direction_kw = "Left" # these english words are required by the kodi api if "RightKeyword" in message.data: direction_kw = "Right" # these english words are required by the kodi api if "EnterKeyword" in message.data: direction_kw = "Enter" # these english words are required by the kodi api if "SelectKeyword" in message.data: direction_kw = "Select" # these english words are required by the kodi api if "BackKeyword" in message.data: direction_kw = "Back" # these english words are required by the kodi api repeat_count = self.get_repeat_words(message.data.get('utterance')) if direction_kw: for each_count in range(0, int(repeat_count)): response = move_cursor(self.kodi_path, direction_kw) if "OK" in response.text: wait_while_speaking() self.speak_dialog("direction", data={"result": direction_kw}, expect_response=True)
def handle_custom_eye_color(self, message): # Conversational interaction to set a custom eye color def is_byte(utt): try: return 0 <= int(utt) <= 255 except: return False self.speak_dialog('set.custom.color') wait_while_speaking() r = self.get_response('get.r.value', validator=is_byte, on_fail="error.rgbvalue", num_retries=2) if not r: return # cancelled g = self.get_response('get.g.value', validator=is_byte, on_fail="error.rgbvalue", num_retries=2) if not g: return # cancelled b = self.get_response('get.b.value', validator=is_byte, on_fail="error.rgbvalue", num_retries=2) if not b: return # cancelled custom_rgb = [r, g, b] self.set_eye_color(rgb=custom_rgb)
def muteHandler(self, message): global speak_tele if (self.mute == 'true') or (self.mute == 'True'): self.mixer.setmute(1) wait_while_speaking() self.mixer.setmute(0) self.remove_event('recognizer_loop:audio_output_start')
def handle_tafseer_intent(self, message): article = message.data.get('surah') if article is None: surah = str(random.choice(range(1, 114))) else: try: surah = str(utils.surahs.index(article) + 1) except ValueError: surah = str(random.choice(range(1, 114))) #Audio url = "http://api.alquran.cloud/v1/surah/" + surah + "/ar.alafasy" json = utils.json_from_url(url) path_surah = utils.parse_surah(json) #Tafseer #url="http://api.alquran.cloud/v1/surah/"+surah+"/editions/ar.muyassar" url = "http://api.alquran.cloud/v1/surah/" + surah + "/editions/ar.jalalayn" json = utils.json_from_url(url) path_tafseer = utils.parse_tafseer(json) try: for ayah in range(len(path_tafseer)): #self.audioservice.play(path_surah[ayah]) #wait_while_speaking() self.speak(path_tafseer[ayah]) wait_while_speaking() except Exception as e: self.log.error("Error: {0}".format(e))
def process(self, data): # TODO: Look into removing this emit altogether. # We need to check if any other serial bus messages # are handled by other parts of the code if "mycroft.stop" not in data: self.bus.emit(Message(data)) if "Command: system.version" in data: # This happens in response to the "system.version" message # sent during the construction of Enclosure() self.bus.emit(Message("enclosure.started")) if "mycroft.stop" in data: if has_been_paired(): create_signal('buttonPress') self.bus.emit(Message("mycroft.stop")) if "volume.up" in data: self.bus.emit(Message("mycroft.volume.increase", {'play_sound': True})) if "volume.down" in data: self.bus.emit(Message("mycroft.volume.decrease", {'play_sound': True})) if "system.test.begin" in data: self.bus.emit(Message('recognizer_loop:sleep')) if "system.test.end" in data: self.bus.emit(Message('recognizer_loop:wake_up')) if "mic.test" in data: mixer = Mixer() prev_vol = mixer.getvolume()[0] mixer.setvolume(35) self.bus.emit(Message("speak", { 'utterance': "I am testing one two three"})) time.sleep(0.5) # Prevents recording the loud button press record("/tmp/test.wav", 3.0) mixer.setvolume(prev_vol) play_wav("/tmp/test.wav").communicate() # Test audio muting on arduino subprocess.call('speaker-test -P 10 -l 0 -s 1', shell=True) if "unit.shutdown" in data: # Eyes to soft gray on shutdown self.bus.emit(Message("enclosure.eyes.color", {'r': 70, 'g': 65, 'b': 69})) self.bus.emit( Message("enclosure.eyes.timedspin", {'length': 12000})) self.bus.emit(Message("enclosure.mouth.reset")) time.sleep(0.5) # give the system time to pass the message self.bus.emit(Message("system.shutdown")) if "unit.reboot" in data: # Eyes to soft gray on reboot self.bus.emit(Message("enclosure.eyes.color", {'r': 70, 'g': 65, 'b': 69})) self.bus.emit(Message("enclosure.eyes.spin")) self.bus.emit(Message("enclosure.mouth.reset")) time.sleep(0.5) # give the system time to pass the message self.bus.emit(Message("system.reboot")) if "unit.setwifi" in data: self.bus.emit(Message("system.wifi.setup", {'lang': self.lang})) if "unit.factory-reset" in data: self.bus.emit(Message("speak", { 'utterance': mycroft.dialog.get("reset to factory defaults")})) subprocess.call( 'rm ~/.mycroft/identity/identity2.json', shell=True) self.bus.emit(Message("system.wifi.reset")) self.bus.emit(Message("system.ssh.disable")) wait_while_speaking() self.bus.emit(Message("enclosure.mouth.reset")) self.bus.emit(Message("enclosure.eyes.spin")) self.bus.emit(Message("enclosure.mouth.reset")) time.sleep(5) # give the system time to process all messages self.bus.emit(Message("system.reboot")) if "unit.enable-ssh" in data: # This is handled by the wifi client self.bus.emit(Message("system.ssh.enable")) self.bus.emit(Message("speak", { 'utterance': mycroft.dialog.get("ssh enabled")})) if "unit.disable-ssh" in data: # This is handled by the wifi client self.bus.emit(Message("system.ssh.disable")) self.bus.emit(Message("speak", { 'utterance': mycroft.dialog.get("ssh disabled")})) if "unit.enable-learning" in data or "unit.disable-learning" in data: enable = 'enable' in data word = 'enabled' if enable else 'disabled' LOG.info("Setting opt_in to: " + word) new_config = {'opt_in': enable} user_config = LocalConf(USER_CONFIG) user_config.merge(new_config) user_config.store() self.bus.emit(Message("speak", { 'utterance': mycroft.dialog.get("learning " + word)}))