def process(self, data): self.client.emit(Message(data)) if "mycroft.stop" in data: self.client.emit(Message("mycroft.stop")) if "volume.up" in data: self.client.emit( Message("IncreaseVolumeIntent", metadata={'play_sound': True})) if "volume.down" in data: self.client.emit( Message("DecreaseVolumeIntent", metadata={'play_sound': True})) if "system.test.begin" in data: self.client.emit(Message('recognizer_loop:sleep')) if "system.test.end" in data: self.client.emit(Message('recognizer_loop:wake_up')) if "mic.test" in data: mixer = Mixer() prev_vol = mixer.getvolume()[0] mixer.setvolume(35) self.client.emit(Message("speak", metadata={ 'utterance': "I am testing one two three"})) time.sleep(0.5) # Prevents recording the loud button press record("/tmp/test.wav", 3.0) mixer.setvolume(prev_vol) play_wav("/tmp/test.wav") time.sleep(3.5) # Pause between tests so it's not so fast # Test audio muting on arduino subprocess.call('speaker-test -P 10 -l 0 -s 1', shell=True)
def handle_sounds_animal(self, message): animal = message.data.get('animal') sound_file = self.sound_file_path(animal) if sound_file: play_wav(sound_file) else: self.speak_dialog('not.sure', data={'animal': animal})
def __play(self, req): resp = req.result() if resp.status_code == 200: self.__save(resp.content) play_wav(self.filename) else: LOGGER.error('%s Http Error: %s for url: %s' % (resp.status_code, resp.reason, resp.url))
def listen(self, source, emitter, stream=None): """Listens for chunks of audio that Mycroft should perform STT on. This will listen continuously for a wake-up-word, then return the audio chunk containing the spoken phrase that comes immediately afterwards. Args: source (AudioSource): Source producing the audio chunks emitter (EventEmitter): Emitter for notifications of when recording begins and ends. stream (AudioStreamHandler): Stream target that will receive chunks of the utterance audio while it is being recorded Returns: AudioData: audio with the user's utterance, minus the wake-up-word """ assert isinstance(source, AudioSource), "Source must be an AudioSource" # bytes_per_sec = source.SAMPLE_RATE * source.SAMPLE_WIDTH sec_per_buffer = float(source.CHUNK) / source.SAMPLE_RATE # Every time a new 'listen()' request begins, reset the threshold # used for silence detection. This is as good of a reset point as # any, as we expect the user and Mycroft to not be talking. # NOTE: adjust_for_ambient_noise() doc claims it will stop early if # speech is detected, but there is no code to actually do that. self.adjust_for_ambient_noise(source, 1.0) LOG.debug("Waiting for wake word...") self._wait_until_wake_word(source, sec_per_buffer) if self._stop_signaled: return LOG.debug("Recording...") emitter.emit("recognizer_loop:record_begin") # If enabled, play a wave file with a short sound to audibly # indicate recording has begun. if self.config.get('confirm_listening'): audio_file = resolve_resource_file( self.config.get('sounds').get('start_listening')) if audio_file: source.mute() play_wav(audio_file).wait() source.unmute() frame_data = self._record_phrase(source, sec_per_buffer, stream) audio_data = self._create_audio_data(frame_data, source) emitter.emit("recognizer_loop:record_end") if self.save_utterances: LOG.info("Recording utterance") stamp = str(datetime.datetime.now()) filename = "/tmp/mycroft_utterance%s.wav" % stamp with open(filename, 'wb') as filea: filea.write(audio_data.get_wav_data()) LOG.debug("Thinking...") return audio_data
def process(self, data): self.client.emit(Message(data)) if "mycroft.stop" in data: self.client.emit(Message("mycroft.stop")) if "volume.up" in data: self.client.emit( Message("IncreaseVolumeIntent", metadata={'play_sound': True})) if "volume.down" in data: self.client.emit( Message("DecreaseVolumeIntent", metadata={'play_sound': True})) if "system.test.begin" in data: self.client.emit(Message('recognizer_loop:sleep')) if "system.test.end" in data: self.client.emit(Message('recognizer_loop:wake_up')) if "mic.test" in data: mixer = Mixer() prev_vol = mixer.getvolume()[0] mixer.setvolume(35) self.client.emit(Message("speak", metadata={ 'utterance': "I am testing one two three"})) record("/tmp/test.wav", 3.5) play_wav("/tmp/test.wav") # Test audio muting on arduino self.client.emit(Message("speak", metadata={ 'utterance': "LOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOONG"})) mixer.setvolume(prev_vol)
def main(): parser = argparse.ArgumentParser() parser.add_argument( '-f', '--filename', dest='filename', default="/tmp/test.wav", help="Filename for saved audio (Default: /tmp/test.wav)") parser.add_argument('-d', '--duration', dest='duration', type=int, default=10, help="Duration of recording in seconds (Default: 10)") args = parser.parse_args() print(" ===========================================================") print(" == STARTING TO RECORD, MAKE SOME NOISE! ==") print(" ===========================================================") record(args.filename, args.duration) print(" ===========================================================") print(" == DONE RECORDING, PLAYING BACK... ==") print(" ===========================================================") play_wav(args.filename)
def ex_bell(self): self.enclosure.eyes_color(253, 158, 102) self.log.info("show picture") self.gui.show_image(abspath(dirname(__file__))+"/alarm-clock.jpg", fill='PreserveAspectFit') if self.settings["sound"]: play_wav(REMINDER_PING) self.log.info("notifacation available")
def process(self, data): self.client.emit(Message(data)) if "mycroft.stop" in data: self.client.emit(Message("mycroft.stop")) if "volume.up" in data: self.client.emit( Message("IncreaseVolumeIntent", metadata={'play_sound': True})) if "volume.down" in data: self.client.emit( Message("DecreaseVolumeIntent", metadata={'play_sound': True})) if "system.test.begin" in data: self.client.emit(Message('recognizer_loop:sleep')) if "system.test.end" in data: self.client.emit(Message('recognizer_loop:wake_up')) if "mic.test" in data: mixer = Mixer() prev_vol = mixer.getvolume()[0] mixer.setvolume(35) self.client.emit(Message("speak", metadata={ 'utterance': "I am testing one two three"})) time.sleep(0.5) # Prevents recording the loud button press record("/tmp/test.wav", 3.0) mixer.setvolume(prev_vol) play_wav("/tmp/test.wav") time.sleep(3.5) # Pause between tests so it's not so fast # Test audio muting on arduino subprocess.call('speaker-test -P 10 -l 0 -s 1', shell=True) if "unit.shutdown" in data: self.client.emit( Message("enclosure.eyes.timedspin", metadata={'length': 12000})) self.client.emit(Message("enclosure.mouth.reset")) subprocess.call('systemctl poweroff -i', shell=True) if "unit.reboot" in data: self.client.emit( Message("enclosure.eyes.spin")) self.client.emit(Message("enclosure.mouth.reset")) subprocess.call('systemctl reboot -i', shell=True) if "unit.setwifi" in data: self.client.emit(Message("wifisetup.start")) if "unit.factory-reset" in data: subprocess.call( 'rm ~/.mycroft/identity/identity.json', shell=True) self.client.emit( Message("enclosure.eyes.spin")) self.client.emit(Message("enclosure.mouth.reset")) subprocess.call('systemctl reboot -i', shell=True)
def take_break(self): # 5. Communicate start of break self.speak_dialog('break.start') time.sleep(60 * self.break_duration) # sleep for duration of break play_wav(self.sound_file) time.sleep(3) self.speak_dialog('break.end') # communicate end of break self.start_study_timer() # start studying again (go back to 3.)
def main(): parser = argparse.ArgumentParser() parser.add_argument('-f', '--filename', dest='filename', default="/tmp/test.wav", help="Filename for saved audio (Default: /tmp/test.wav)") parser.add_argument('-d', '--duration', dest='duration', type=int, default=10, help="Duration of recording in seconds (Default: 10)") args = parser.parse_args() record(args.filename, args.duration) play_wav(args.filename)
def __communicate_volume_change(self, message, dialog, code, changed): play_sound = message.data.get('play_sound', False) if play_sound: if changed: play_wav(self.volume_sound) else: if (not changed) and (code != 0): self.speak_dialog('already.max.volume', data={'volume': code})
def handle_sodexo_place_intent(self, message): play_wav(self.shutter_sound) #req = requests.get('http://sodexo-restauration.moneweb.fr') #soup = BeautifulSoup(req.text, "html.parser") #places_dispo = soup.find("div", {"class": "litPlacesDispo"}) #places = places_dispo.get_text() places = "Currently 320 places available" self.speak(places)
def handle_dukenukem_intent(self, message): path = dirname(__file__) + "/sounds/dukenukem" files = [wav for wav in listdir(path) if ".wav" in wav] if len(files): wav = path + "/" + random.choice(files) play_wav(wav) else: self.speak_dialog("bad_file")
def communicate_volume_change(self, message, dialog, code, changed): play_sound = message.data.get('play_sound', False) if play_sound: if changed: play_wav(self.volume_sound) else: if not changed: dialog = 'already.max.volume' self.speak_dialog(dialog, data={'volume': code})
def process(self, data): self.ws.emit(Message(data)) if "Command: system.version" in data: self.ws.emit(Message("enclosure.start")) if "mycroft.stop" in data: create_signal('buttonPress') # FIXME - Must use WS instead self.ws.emit(Message("mycroft.stop")) if "volume.up" in data: self.ws.emit(Message("IncreaseVolumeIntent", {'play_sound': True})) if "volume.down" in data: self.ws.emit(Message("DecreaseVolumeIntent", {'play_sound': True})) if "system.test.begin" in data: self.ws.emit(Message('recognizer_loop:sleep')) if "system.test.end" in data: self.ws.emit(Message('recognizer_loop:wake_up')) if "mic.test" in data: mixer = Mixer() prev_vol = mixer.getvolume()[0] mixer.setvolume(35) self.ws.emit( Message("speak", {'utterance': "I am testing one two three"})) time.sleep(0.5) # Prevents recording the loud button press record("/tmp/test.wav", 3.0) mixer.setvolume(prev_vol) play_wav("/tmp/test.wav").communicate() # Test audio muting on arduino subprocess.call('speaker-test -P 10 -l 0 -s 1', shell=True) if "unit.shutdown" in data: self.ws.emit(Message("enclosure.eyes.timedspin", {'length': 12000})) self.ws.emit(Message("enclosure.mouth.reset")) subprocess.call('systemctl poweroff -i', shell=True) if "unit.reboot" in data: self.ws.emit(Message("enclosure.eyes.spin")) self.ws.emit(Message("enclosure.mouth.reset")) subprocess.call('systemctl reboot -i', shell=True) if "unit.setwifi" in data: self.ws.emit(Message("mycroft.wifi.start")) if "unit.factory-reset" in data: subprocess.call('rm ~/.mycroft/identity/identity2.json', shell=True) self.ws.emit(Message("enclosure.eyes.spin")) self.ws.emit(Message("enclosure.mouth.reset")) subprocess.call('systemctl reboot -i', shell=True)
def __play(self, req): resp = req.result() if resp.status_code == 200: self.__save(resp.content) LOGGER.info("playing wav from response") play_wav(self.filename).communicate() else: LOGGER.error('%s Http Error: %s for url: %s' % (resp.status_code, resp.reason, resp.url))
def listen(self, source, emitter): """Listens for chunks of audio that Mycroft should perform STT on. This will listen continuously for a wake-up-word, then return the audio chunk containing the spoken phrase that comes immediately afterwards. Args: source (AudioSource): Source producing the audio chunks emitter (EventEmitter): Emitter for notifications of when recording begins and ends. Returns: AudioData: audio with the user's utterance, minus the wake-up-word """ assert isinstance(source, AudioSource), "Source must be an AudioSource" # bytes_per_sec = source.SAMPLE_RATE * source.SAMPLE_WIDTH sec_per_buffer = float(source.CHUNK) / source.SAMPLE_RATE # Every time a new 'listen()' request begins, reset the threshold # used for silence detection. This is as good of a reset point as # any, as we expect the user and Mycroft to not be talking. # NOTE: adjust_for_ambient_noise() doc claims it will stop early if # speech is detected, but there is no code to actually do that. self.adjust_for_ambient_noise(source, 1.0) LOG.debug("Waiting for wake word...") self._wait_until_wake_word(source, sec_per_buffer) if self._stop_signaled: return LOG.debug("Recording...") emitter.emit("recognizer_loop:record_begin") # If enabled, play a wave file with a short sound to audibly # indicate recording has begun. if self.config.get('confirm_listening'): audio_file = resolve_resource_file( self.config.get('sounds').get('start_listening')) if audio_file: source.mute() play_wav(audio_file).wait() source.unmute() frame_data = self._record_phrase(source, sec_per_buffer) audio_data = self._create_audio_data(frame_data, source) emitter.emit("recognizer_loop:record_end") if self.save_utterances: LOG.info("Recording utterance") stamp = str(datetime.datetime.now()) filename = "/tmp/mycroft_utterance%s.wav" % stamp with open(filename, 'wb') as filea: filea.write(audio_data.get_wav_data()) LOG.debug("Thinking...") return audio_data
def play_dialog(self, sound_dialog, speak_dialog_name, data): if self.speak_dialog_enabled == "True" or not sound_dialog: self.speak_dialog(speak_dialog_name, data=data) else: path = join(abspath(dirname(__file__)), "sounds", sound_dialog) if isfile(path): play_wav(path) else: self.speak_dialog(speak_dialog_name, data=data)
def handle_record_end(): logger.info("End Recording...") if config.get('confirm_listening'): file = resolve_resource_file(config.get('sounds').get('end_listening')) if file: play_wav(file) ws.emit(Message('recognizer_loop:record_end'))
def communicate_volume_change(self, message, dialog, code, changed): play_sound = message.metadata.get('play_sound', False) if play_sound: if changed: play_wav(self.volume_sound) else: if not changed: dialog = 'already.max.volume' self.speak_dialog(dialog, data={'volume': code})
def mute_and_confirm_listening(self, source): audio_file = resolve_resource_file( self.config.get('sounds').get('start_listening')) if audio_file: source.mute() play_wav(audio_file).wait() source.unmute() return True else: return False
def take_picture_intent(self, message): #It takes a moment for the command to be processed so probably best to prompt them! self.speak_dialog("ready") wait_while_speaking() #Play the shutter sound play_wav(self.shutter_sound) #take the photo sultan = Sultan() sultan.fswebcam("-r 640x480 --no-banner ~/webcam/image.jpg").run() #Comment on having taken the photo: self.speak_dialog("picture")
def start_study_timer(self): # 3. Sleep for the length of the study interval time.sleep(60 * self.study_duration) # 4. Check if end of session self.interval_counter += 1 play_wav(self.sound_file) time.sleep(3) if self.interval_counter <= self.session_length: self.end_session() # if yes, end the session else: self.take_break() # if not, take a break
def main(): parser = argparse.ArgumentParser() parser.add_argument( '-f', '--filename', dest='filename', default="/tmp/test.wav", help="Filename for saved audio (Default: /tmp/test.wav)") parser.add_argument( '-d', '--duration', dest='duration', type=int, default=10, help="Duration of recording in seconds (Default: 10)") args = parser.parse_args() record(args.filename, args.duration) play_wav(args.filename)
def _play(self, req): """play wav file after saving to tmp Args: req (object): requests object """ if req.status_code == 200: self._save(req.content) play_wav(self.filename).communicate() else: LOG.error('%s Http Error: %s for url: %s' % (req.status_code, req.reason, req.url))
def handle_record_begin(self): # If enabled, play a wave file with a short sound to audibly # indicate recording has begun. if self.audioconfig.get('confirm_listening'): file = resolve_resource_file( self.audioconfig.get('sounds').get('start_listening')) if file: play_wav(file) LOG.info("deactivating speech recognition") self.emit("recognizer_loop:sleep") self.emit("recognizer_loop:local_listener.start") self.emit('recognizer_loop:record_begin')
def handle_record_begin(): logger.info("Begin Recording...") # If enabled, play a wave file with a short sound to audibly # indicate recording has begun. if config.get('confirm_listening'): file = resolve_resource_file( config.get('sounds').get('start_listening')) if file: play_wav(file) ws.emit(Message('recognizer_loop:record_begin'))
def handle_take_picture(self, message): if exists(self.settings["camera_sound_path"]) and \ self.settings["play_sound"]: if ".wav" in self.settings["camera_sound_path"]: play_wav(self.settings["camera_sound_path"]) elif ".mp3" in self.settings["camera_sound_path"]: play_mp3(self.settings["camera_sound_path"]) pic_path = join(self.settings["picture_path"], time.asctime() + ".jpg") cv2.imwrite(pic_path, self.last_frame) self.mail_picture(pic_path) self.speak_dialog("picture")
def train(self, message=None): single_thread = message.data.get('single_thread', False) self.finished_training_event.clear() LOG.info('Training...') self.container.train(single_thread=single_thread) LOG.info('Training complete.') self.load_config = Configuration.get() file = resolve_resource_file("snd/wellcome.WAV") play_wav(file) self.finished_training_event.set() self.finished_initial_train = True
def _play(self, req): """play wav file after saving to tmp Args: req (object): requests object """ if req.status_code == 200: self._save(req.content) play_wav(self.filename).communicate() else: LOG.error( '%s Http Error: %s for url: %s' % (req.status_code, req.reason, req.url))
def handle_single_whitenoise(self, message): print("inside handler") wait_while_speaking() self.stopped = False now = datetime.now() print(message.data.get('sound')) if message.data.get('sound') is not None: print("inside not None") title = message.data.get('sound') score = match_one(title, self.play_list) print(score) if score[1] > 0.5: self.process = play_wav(score[0]) fname = score[0] #Loop Infinitely with contextlib.closing(wave.open(fname, 'r')) as f: frames = f.getnframes() rate = f.getframerate() duration = frames / float(rate) self.audio_length = duration print(duration) self.songTimer = { "file": fname, "expires": now + timedelta(seconds=self.audio_length) } self.check_replay(None) else: self.speak('Sorry I could not find that sound in my library') return None else: print("inside None") sound_file = list(self.play_list.values()) sound_file = random.choice(sound_file) print(sound_file) #if os.path.isfile(sound_file): wait_while_speaking() self.process = play_wav(sound_file) #Loop Infinitely fname = sound_file with contextlib.closing(wave.open(fname, 'r')) as f: frames = f.getnframes() rate = f.getframerate() duration = frames / float(rate) self.audio_length = duration print(duration) self.songTimer = { "file": fname, "expires": now + timedelta(seconds=self.audio_length) } self.check_replay(None)
def __check_reminder(self, message): """ Repeating event handler. Checking if a reminder time has been reached and presents the reminder. """ now = now_local() handled_reminders = [] for r in self.settings.get('reminders', []): dt = deserialize(r[1]) if now > dt: play_wav(REMINDER_PING) self.speak_dialog('Reminding', data={'reminder': r[0]}) handled_reminders.append(r) if now > dt - timedelta(minutes=10): self.add_notification(r[0], r[0], dt) self.remove_handled(handled_reminders)
def keypad_callback(self, key): print(key) if (self.last_msg_time + sleep_time) < time.time(): self.last_msg_time = time.time() callbacks = self.get_callbacks() if callbacks[key] is not None: click_file = FilePathManager.resolve('/resources/click.wav') # os.system('aplay -Dhw:0,0 ' + ) play_wav(click_file) callbacks[key]() else: LOG.warning('NOT DEFINED') else: LOG.warning('Ignoring')
def notify(self, repeat=42): """ recursively calls it's self to play alarm mp3 Args: repeat (int): number of times it'll call itself """ if hasattr(self, 'notify_event_name'): self.cancel_scheduled_event(self.notify_event_name) self.allow_notify = True path = join(abspath(dirname(__file__)), 'timerBeep.wav') self.notify_process = play_wav(path) if self.stop_notify is False: if repeat > 0: arw_time = arrow.now().replace(tzinfo='local') arw_time = arw_time.shift(seconds=4) self.notify_event_name = \ 'mycroftalarm.notify.repeat.{}'.format(repeat) self.schedule_event(lambda x=None: self.notify(repeat - 1), arw_time.datetime, data=self.notify_event_name, name=self.notify_event_name) else: self.reset_notify() if self.stop_notify is True: self.reset_notify()
def execute(self, sentence): output = subprocess.check_output(self.args + ['-t', sentence]) self.blink(0.5) process = play_wav(self.filename) self.visime(output) process.communicate() self.blink(0.2)
def main(): parser = argparse.ArgumentParser() parser.add_argument( '-f', '--filename', dest='filename', default="/tmp/test.wav", help="Filename for saved audio (Default: /tmp/test.wav)") parser.add_argument( '-d', '--duration', dest='duration', type=int, default=10, help="Duration of recording in seconds (Default: 10)") parser.add_argument( '-v', '--verbose', dest='verbose', action='store_true', default=False, help="Add extra output regarding the recording") args = parser.parse_args() print(" ===========================================================") print(" == STARTING TO RECORD, MAKE SOME NOISE! ==") print(" ===========================================================") if not args.verbose: with mute_output(): record(args.filename, args.duration) else: record(args.filename, args.duration) print(" ===========================================================") print(" == DONE RECORDING, PLAYING BACK... ==") print(" ===========================================================") status = play_wav(args.filename).wait() if status: print('An error occured while playing back audio ({})'.format(status))
def check_replay(self, message): print("inside check_replay") # Check if there is an expired timer now = datetime.now() if self.stopped == False: # Calc remaining time and show using faceplate if (self.songTimer["expires"] > now): if self.stopped == False: # Timer still running remaining = (self.songTimer["expires"] - now).seconds print(remaining) self.cancel_scheduled_event('Replay') self.schedule_repeating_event(self.check_replay, None, 1, name='Replay') else: # Timer has expired but not been cleared, flash eyes overtime = (now - self.songTimer["expires"]).seconds print(overtime) self.cancel_scheduled_event('Replay') sound_file = self.songTimer["file"] self.process = play_wav(sound_file) self.songTimer = { "file": sound_file, "expires": now + timedelta(seconds=self.audio_length) } self.schedule_repeating_event(self.check_replay, None, 1, name='Replay') else: self.cancel_scheduled_event('Replay') self.stop()
def main(): parser = argparse.ArgumentParser() parser.add_argument( '-f', '--filename', dest='filename', default="/tmp/test.wav", help="Filename for saved audio (Default: /tmp/test.wav)") parser.add_argument( '-d', '--duration', dest='duration', type=int, default=10, help="Duration of recording in seconds (Default: 10)") parser.add_argument( '-v', '--verbose', dest='verbose', action='store_true', default=False, help="Add extra output regarding the recording") parser.add_argument( '-l', '--list', dest='show_devices', action='store_true', default=False, help="List all availabile input devices") args = parser.parse_args() if args.show_devices: print(" Initializing... ") pa = pyaudio.PyAudio() print(" ====================== Audio Devices ======================") print(" Index Device Name") for device_index in range(pa.get_device_count()): dev = pa.get_device_info_by_index(device_index) if dev['maxInputChannels'] > 0: print(' {}: {}'.format(device_index, dev['name'])) print() config = Configuration.get() if "device_name" in config["listener"]: dev = config["listener"]["device_name"] elif "device_index" in config["listener"]: dev = "Device at index {}".format(config["listener"]["device_index"]) else: dev = "Default device" samplerate = config["listener"]["sample_rate"] play_cmd = config["play_wav_cmdline"].replace("%1", "WAV_FILE") print(" ========================== Info ===========================") print(" Input device: {} @ Sample rate: {} Hz".format(dev, samplerate)) print(" Playback commandline: {}".format(play_cmd)) print() print(" ===========================================================") print(" == STARTING TO RECORD, MAKE SOME NOISE! ==") print(" ===========================================================") if not args.verbose: with mute_output(): record(args.filename, args.duration) else: record(args.filename, args.duration) print(" ===========================================================") print(" == DONE RECORDING, PLAYING BACK... ==") print(" ===========================================================") status = play_wav(args.filename).wait() if status: print('An error occured while playing back audio ({})'.format(status))
def _play(self, message): """ Implementation specific async method to handle playback. This allows mpg123 service to use the "next method as well as basic play/stop. """ LOG.info('SimpleAudioService._play') repeat = message.data.get('repeat', False) self._is_playing = True if isinstance(self.tracks[self.index], list): track = self.tracks[self.index][0] mime = self.tracks[self.index][1] mime = mime.split('/') else: # Assume string track = self.tracks[self.index] mime = find_mime(track) # Indicate to audio service which track is being played if self._track_start_callback: self._track_start_callback(track) # Replace file:// uri's with normal paths track = track.replace('file://', '') try: if 'mpeg' in mime[1]: self.process = play_mp3(track) elif 'ogg' in mime[1]: self.process = play_ogg(track) elif 'wav' in mime[1]: self.process = play_wav(track) else: # If no mime info could be determined guess mp3 self.process = play_mp3(track) except FileNotFoundError as e: LOG.error('Couldn\'t play audio, {}'.format(repr(e))) self.process = None # Wait for completion or stop request while (self.process and self.process.poll() is None and not self._stop_signal): sleep(0.25) if self._stop_signal: self.process.terminate() self.process = None self._is_playing = False return self.index += 1 # if there are more tracks available play next if self.index < len(self.tracks) or repeat: if self.index >= len(self.tracks): self.index = 0 self.bus.emit(Message('SimpleAudioServicePlay', {'repeat': repeat})) else: self._is_playing = False
def main(): parser = argparse.ArgumentParser() parser.add_argument( '-f', '--filename', dest='filename', default="/tmp/test.wav", help="Filename for saved audio (Default: /tmp/test.wav)") parser.add_argument( '-d', '--duration', dest='duration', type=int, default=10, help="Duration of recording in seconds (Default: 10)") args = parser.parse_args() print(" ===========================================================") print(" == STARTING TO RECORD, MAKE SOME NOISE! ==") print(" ===========================================================") record(args.filename, args.duration) print(" ===========================================================") print(" == DONE RECORDING, PLAYING BACK... ==") print(" ===========================================================") play_wav(args.filename)
def notify(self, timestamp): with self.LOCK: if self.data.__contains__(timestamp): volume = None self.alarm_on = True delay = self.__calculate_delay(self.max_delay) while self.alarm_on and datetime.now() < delay: play_wav(self.file_path) time.sleep(1) self.speak_dialog('alarm.stop') time.sleep(self.repeat_time + 2) if not volume and datetime.now() >= delay: mixer = Mixer() volume = mixer.getvolume()[0] mixer.setvolume(100) delay = self.__calculate_delay(self.extended_delay) if volume: Mixer().setvolume(volume) self.remove(timestamp) self.alarm_on = False self.save()
def run(self): """ Thread main loop. get audio and visime data from queue and play. """ while not self._terminated: try: snd_type, data, visimes, ident = self.queue.get(timeout=2) self.blink(0.5) if not self._processing_queue: self._processing_queue = True self.tts.begin_audio() stopwatch = Stopwatch() with stopwatch: if snd_type == 'wav': self.p = play_wav(data) elif snd_type == 'mp3': self.p = play_mp3(data) if visimes: if self.show_visimes(visimes): self.clear_queue() else: self.p.communicate() self.p.wait() send_playback_metric(stopwatch, ident) if self.queue.empty(): self.tts.end_audio() self._processing_queue = False self._clear_visimes = False self.blink(0.2) except Empty: pass except Exception as e: LOG.exception(e) if self._processing_queue: self.tts.end_audio() self._processing_queue = False
def execute(self, sentence): tts = gTTS(text=sentence, lang=self.lang) tts.save(self.filename) play_wav(self.filename)
def process(self, data): # TODO: Look into removing this emit altogether. # We need to check if any other serial bus messages # are handled by other parts of the code if "mycroft.stop" not in data: self.ws.emit(Message(data)) if "Command: system.version" in data: # This happens in response to the "system.version" message # sent during the construction of Enclosure() self.ws.emit(Message("enclosure.started")) if "mycroft.stop" in data: if has_been_paired(): create_signal('buttonPress') self.ws.emit(Message("mycroft.stop")) if "volume.up" in data: self.ws.emit(Message("mycroft.volume.increase", {'play_sound': True})) if "volume.down" in data: self.ws.emit(Message("mycroft.volume.decrease", {'play_sound': True})) if "system.test.begin" in data: self.ws.emit(Message('recognizer_loop:sleep')) if "system.test.end" in data: self.ws.emit(Message('recognizer_loop:wake_up')) if "mic.test" in data: mixer = Mixer() prev_vol = mixer.getvolume()[0] mixer.setvolume(35) self.ws.emit(Message("speak", { 'utterance': "I am testing one two three"})) time.sleep(0.5) # Prevents recording the loud button press record("/tmp/test.wav", 3.0) mixer.setvolume(prev_vol) play_wav("/tmp/test.wav").communicate() # Test audio muting on arduino subprocess.call('speaker-test -P 10 -l 0 -s 1', shell=True) if "unit.shutdown" in data: # Eyes to soft gray on shutdown self.ws.emit(Message("enclosure.eyes.color", {'r': 70, 'g': 65, 'b': 69})) self.ws.emit( Message("enclosure.eyes.timedspin", {'length': 12000})) self.ws.emit(Message("enclosure.mouth.reset")) time.sleep(0.5) # give the system time to pass the message self.ws.emit(Message("system.shutdown")) if "unit.reboot" in data: # Eyes to soft gray on reboot self.ws.emit(Message("enclosure.eyes.color", {'r': 70, 'g': 65, 'b': 69})) self.ws.emit(Message("enclosure.eyes.spin")) self.ws.emit(Message("enclosure.mouth.reset")) time.sleep(0.5) # give the system time to pass the message self.ws.emit(Message("system.reboot")) if "unit.setwifi" in data: self.ws.emit(Message("system.wifi.setup", {'lang': self.lang})) if "unit.factory-reset" in data: self.ws.emit(Message("speak", { 'utterance': mycroft.dialog.get("reset to factory defaults")})) subprocess.call( 'rm ~/.mycroft/identity/identity2.json', shell=True) self.ws.emit(Message("system.wifi.reset")) self.ws.emit(Message("system.ssh.disable")) wait_while_speaking() self.ws.emit(Message("enclosure.mouth.reset")) self.ws.emit(Message("enclosure.eyes.spin")) self.ws.emit(Message("enclosure.mouth.reset")) time.sleep(5) # give the system time to process all messages self.ws.emit(Message("system.reboot")) if "unit.enable-ssh" in data: # This is handled by the wifi client self.ws.emit(Message("system.ssh.enable")) self.ws.emit(Message("speak", { 'utterance': mycroft.dialog.get("ssh enabled")})) if "unit.disable-ssh" in data: # This is handled by the wifi client self.ws.emit(Message("system.ssh.disable")) self.ws.emit(Message("speak", { 'utterance': mycroft.dialog.get("ssh disabled")})) if "unit.enable-learning" in data or "unit.disable-learning" in data: enable = 'enable' in data word = 'enabled' if enable else 'disabled' LOG.info("Setting opt_in to: " + word) new_config = {'opt_in': enable} user_config = LocalConf(USER_CONFIG) user_config.merge(new_config) user_config.store() self.ws.emit(Message("speak", { 'utterance': mycroft.dialog.get("learning " + word)}))
def process(self, data): self.ws.emit(Message(data)) if "Command: system.version" in data: self.ws.emit(Message("enclosure.start")) if "mycroft.stop" in data: create_signal('buttonPress') # FIXME - Must use WS instead self.ws.emit(Message("mycroft.stop")) if "volume.up" in data: self.ws.emit( Message("IncreaseVolumeIntent", {'play_sound': True})) if "volume.down" in data: self.ws.emit( Message("DecreaseVolumeIntent", {'play_sound': True})) if "system.test.begin" in data: self.ws.emit(Message('recognizer_loop:sleep')) if "system.test.end" in data: self.ws.emit(Message('recognizer_loop:wake_up')) if "mic.test" in data: mixer = Mixer() prev_vol = mixer.getvolume()[0] mixer.setvolume(35) self.ws.emit(Message("speak", { 'utterance': "I am testing one two three"})) time.sleep(0.5) # Prevents recording the loud button press record("/tmp/test.wav", 3.0) mixer.setvolume(prev_vol) play_wav("/tmp/test.wav").communicate() # Test audio muting on arduino subprocess.call('speaker-test -P 10 -l 0 -s 1', shell=True) if "unit.shutdown" in data: self.ws.emit( Message("enclosure.eyes.timedspin", {'length': 12000})) self.ws.emit(Message("enclosure.mouth.reset")) subprocess.call('systemctl poweroff -i', shell=True) if "unit.reboot" in data: self.ws.emit( Message("enclosure.eyes.spin")) self.ws.emit(Message("enclosure.mouth.reset")) subprocess.call('systemctl reboot -i', shell=True) if "unit.setwifi" in data: self.ws.emit(Message("mycroft.wifi.start")) if "unit.factory-reset" in data: subprocess.call( 'rm ~/.mycroft/identity/identity2.json', shell=True) self.ws.emit( Message("enclosure.eyes.spin")) self.ws.emit(Message("enclosure.mouth.reset")) subprocess.call('systemctl reboot -i', shell=True)
def handle_play(self, message): self.play_process = play_wav(self.file_path)
def process(self, data): # TODO: Look into removing this emit altogether. # We need to check if any other serial bus messages # are handled by other parts of the code if "mycroft.stop" not in data: self.ws.emit(Message(data)) if "Command: system.version" in data: # This happens in response to the "system.version" message # sent during the construction of Enclosure() self.ws.emit(Message("enclosure.started")) if "mycroft.stop" in data: if has_been_paired(): create_signal('buttonPress') self.ws.emit(Message("mycroft.stop")) if "volume.up" in data: self.ws.emit( Message("VolumeSkill:IncreaseVolumeIntent", {'play_sound': True})) if "volume.down" in data: self.ws.emit( Message("VolumeSkill:DecreaseVolumeIntent", {'play_sound': True})) if "system.test.begin" in data: self.ws.emit(Message('recognizer_loop:sleep')) if "system.test.end" in data: self.ws.emit(Message('recognizer_loop:wake_up')) if "mic.test" in data: mixer = Mixer() prev_vol = mixer.getvolume()[0] mixer.setvolume(35) self.ws.emit(Message("speak", { 'utterance': "I am testing one two three"})) time.sleep(0.5) # Prevents recording the loud button press record("/tmp/test.wav", 3.0) mixer.setvolume(prev_vol) play_wav("/tmp/test.wav").communicate() # Test audio muting on arduino subprocess.call('speaker-test -P 10 -l 0 -s 1', shell=True) if "unit.shutdown" in data: self.ws.emit( Message("enclosure.eyes.timedspin", {'length': 12000})) self.ws.emit(Message("enclosure.mouth.reset")) subprocess.call('systemctl poweroff -i', shell=True) if "unit.reboot" in data: self.ws.emit(Message("enclosure.eyes.spin")) self.ws.emit(Message("enclosure.mouth.reset")) subprocess.call('systemctl reboot -i', shell=True) if "unit.setwifi" in data: self.ws.emit(Message("mycroft.wifi.start")) if "unit.factory-reset" in data: self.ws.emit(Message("enclosure.eyes.spin")) subprocess.call( 'rm ~/.mycroft/identity/identity2.json', shell=True) self.ws.emit(Message("mycroft.wifi.reset")) self.ws.emit(Message("mycroft.disable.ssh")) self.ws.emit(Message("speak", { 'utterance': mycroft.dialog.get("reset to factory defaults")})) wait_while_speaking() self.ws.emit(Message("enclosure.mouth.reset")) self.ws.emit(Message("enclosure.eyes.spin")) self.ws.emit(Message("enclosure.mouth.reset")) subprocess.call('systemctl reboot -i', shell=True) if "unit.enable-ssh" in data: # This is handled by the wifi client self.ws.emit(Message("mycroft.enable.ssh")) self.ws.emit(Message("speak", { 'utterance': mycroft.dialog.get("ssh enabled")})) if "unit.disable-ssh" in data: # This is handled by the wifi client self.ws.emit(Message("mycroft.disable.ssh")) self.ws.emit(Message("speak", { 'utterance': mycroft.dialog.get("ssh disabled")}))