def _in(): default_mind = 'boot' load_minds() set_mind(default_mind) logging.debug('"{}" is now listening. Say "Boot Mind!" to see if it can hear you.'.format(default_mind)) while not oa.core.finished.is_set(): text = get() logging.info('Input: {}'.format(text)) mind = oa.core.mind if (text is None) or (text.strip() == ''): # Nothing to do. continue t = text.upper() # Check for a matching command. fn = mind.kws.get(t, None) if fn is not None: # There are two types of commands, stubs and command line text. # For stubs, call `perform()`. if isCallable(fn): call_function(fn) oa.last_command = t # For strings, call `sys_exec()`. elif isinstance(fn, str): sys_exec(fn) oa.last_command = t else: # Any unknown command raises an exception. raise Exception("Unable to process: {}".format(text)) yield text
def _in(): mute = 0 while not oa.core.finished.is_set(): raw_data = get() if isinstance(raw_data, str): if raw_data == 'mute': logging.debug('Muted') mute = 1 elif raw_data == 'unmute': logging.debug('Unmuted') mute = 0 time.sleep(.9) empty() continue # Mute mode. Do not listen until unmute. if mute: continue # Obtain audio data. # XXX: race condition when mind isn't set yet dinf = get_decoder() decoder = dinf.decoder decoder.start_utt() # Begin utterance processing. try: decoder.process_raw(raw_data, False, False) # Process audio data with recognition enabled (no_search = False), as a full utterance (full_utt = True) except Exception as e: logging.error(e) decoder.end_utt() # Stop utterance processing. hypothesis = decoder.hyp() if hypothesis is not None: hyp = hypothesis.hypstr if (hyp is None) or (hyp.strip() == ''): continue logging.info("Heard: {}".format(hyp)) if hyp.upper() in dinf.phrases: yield hyp else: continue else: logging.warn('Speech not recognized')
def _in(): if not flMac: tts = pyttsx3.init() while not oa.core.finished.is_set(): s = get() # Pause Ear (listening) while talking. Mute TTS. put('speech_recognition', 'mute') if flMac: _msg = subprocess.Popen(['echo', s], stdout=subprocess.PIPE) _tts = subprocess.Popen(['say'], stdin=_msg.stdout) _msg.stdout.close() _tts.communicate() else: tts.say(s) tts.runAndWait() # Wait until speaking ends. # Continue ear (listening). Unmute TTS. put('speech_recognition', 'unmute')
def _in(): while not oa.core.finished.is_set(): # Print messages from the queue. print(get()) yield ''
def _in(): while not oa.core.finished.is_set(): msg = get() logging.debug(msg)