def handleStartBtn(): pwd1 = ents[0][1].get().strip() uname1 = ents[1][1].get().strip() uname2 = ents[2][1].get().strip() uname3 = ents[3][1].get().strip() uname4 = ents[4][1].get().strip() uname5 = ents[5][1].get().strip() uname6 = ents[6][1].get().strip() uname7 = ents[7][1].get().strip() uname8 = ents[8][1].get().strip() uname9 = ents[9][1].get().strip() uname10 = ents[10][1].get().strip() uname11 = ents[11][1].get().strip() time1 = ents[12][1].get().strip() proxy = ents[13][1].get().strip() file = ents[14][1].get().strip() userNames = [] if uname2: userNames.append(uname2) if uname3: userNames.append(uname3) if uname4: userNames.append(uname4) if uname5: userNames.append(uname5) if uname6: userNames.append(uname6) if uname7: userNames.append(uname7) if uname8: userNames.append(uname8) if uname9: userNames.append(uname9) if uname10: userNames.append(uname10) if uname11: userNames.append(uname11) if uname1 and pwd1: assistant = Assistant() assistant.initializeAssistant(uname1, pwd1, userNames, time1, proxy, file)
def main(): model_path = get_model_path() #print (model_path) # Parse command-line options, # use `Config` to load mind configuration # command-line overrides config file args = _parser(sys.argv[1:]) logger.debug("Arguments: {args}".format(args=args)) conf = Config(path=args.mind_dir, **vars(args)) # # Further patching to ease transition.. # # Configure Language logger.debug("Configuring Module: Language") conf.strings_file = os.path.join(conf.cache_dir, "sentences.corpus") conf.dic_file = os.path.join(conf.cache_dir, 'dic') conf.lang_file = os.path.join(conf.cache_dir, 'lm') conf.fsg_file = None #os.path.join(conf.cache_dir, 'fsg') # sphinx_jsgf2fsg < conf.jsgf_file > conf.fsg_file l = LanguageUpdater(conf) l.update_language() recognizer = LiveSpeech(verbose=False, sampling_rate=16000, buffer_size=2048, no_search=False, full_utt=False, hmm=os.path.join(model_path, 'en-us'), lm=conf.lang_file, dic=conf.dic_file) # A configured Assistant a = Assistant(config=conf) for phrase in recognizer: #print(phrase.hypothesis()) recognizer_finished(a, recognizer, phrase.hypothesis())
conf.fsg_file = None #os.path.join(conf.cache_dir, 'fsg') # sphinx_jsgf2fsg < conf.jsgf_file > conf.fsg_file l = LanguageUpdater(conf) l.update_language() # Configure Recognizer logger.debug("Configuring Module: Speech Recognition") recognizer = Recognizer(conf) # # End patching # # A configured Assistant a = Assistant(config=conf) recognizer.connect('finished', lambda rec, txt, agent=a: recognizer_finished(agent, rec, txt)) # # Questionable dependencies # # Initialize Gobject Threads GObject.threads_init() # Create Main Loop main_loop = GObject.MainLoop() # Handle Signal Interrupts signal.signal(signal.SIGINT, signal.SIG_DFL)
# oa.py - Core Open Assistant Loop import signal from gi.repository import GObject from core import Assistant if __name__ == '__main__': # Create Assistant a = Assistant() # Create Main Loop main_loop = GObject.MainLoop() # Handle Signal Interrupts signal.signal(signal.SIGINT, signal.SIG_DFL) # Run Assistant # a.setup_mic() a.run() # Start Main Loop try: main_loop.run() except: main_loop.quit() sys.exit()
from core import Assistant import sys assistant = Assistant() # register simple function with assistant @assistant.register("add {n1} and {n2}") def add(n1,n2): return n1+n2 @assistant.register("say {word} {n} times") def say(n,word): for _ in range(0,n): print(word) print(assistant.execute(sys.argv[1]))
from core import Assistant, arg_match # simple tests for argument matcher print("Should be true", arg_match("add 3 and 4", "add {n1} and {n2}")) print("Should be false", arg_match("add 3 and 4", "add {n1} and 43")) assistant = Assistant() # register simple function with assistant @assistant.register("add {n1} and {n2}") def add(n1,n2): return n1+n2 # attempt to execute on input print(assistant.execute("add 42 and 54"))