def doTestNumberRecognition(self, words, numberResult): try: natlink.recognitionMimic(words) except natlink.MimicFailed: print 'failed to recognise %s as testnumber' % words return return 1 # success
def click(self, clickType='leftclick', x=None, y=None, appName='iphoneWin'): # get the equivalent event code of the type of mouse event to perform # leftclick, rightclick, rightdouble-click (see kmap) event = self.kmap[clickType] # play events down click and then release (for left double click # increment from left button up event which produces no action # then when incremented, performs the double-click) # if coordinates are not supplied, just click if getattr(event, 'conjugate'): if not (x or y): x, y = natlink.getCursorPos() # apply vertical offset dependent on presence of "personal hotspot" # bar across the top of the screen y += self.windows.appDict[appName].vert_offset logging.debug('clicking at: %d, %d' % (x, y)) natlink.playEvents([(wm_mousemove, x, y), (event, x, y), (event + 1, x, y)]) else: logging.error(' incorrect click look up for the event %s' % str(clickType)) # default to natlink.recognitionMimic(['mouse', 'click'])
def ListDialog(self, id): if not self.Busy: #now handled by list dialog: self.ShowWindow(win32con.SW_RESTORE) self.Busy = 1 (List, Titles, defer, size) = listdialogs.GetDumpedData(RequestFileName) dlg = listdialogs.MultiListDialog(Titles[0], List, colHeadings=Titles[1:], size=size, resize=1) grammar = SelectGrammar() grammar.initialize(dlg) r = dlg.DoModal() grammar.terminate() if r == win32con.IDOK: SetMic('off') listdialogs.DumpData((defer[1], dlg.Selection), ResultFileName) self.Busy = 0 self.ShowWindow(win32con.SW_MINIMIZE) if r == win32con.IDOK: try: natlink.recognitionMimic(defer[0]) except: pass
def recognitionMimic(mimicList): """for Dragon 12, try execScript HeardWord """ if DNSVersion >= 12: script = 'HeardWord "%s"'% '", "'.join(mimicList) natlink.execScript(script) else: natlink.recognitionMimic(mimicList)
def recognitionMimic(mimicList): """for Dragon 12, try execScript HeardWord """ if DNSVersion >= 12: script = 'HeardWord "%s"' % '", "'.join(mimicList) natlink.execScript(script) else: natlink.recognitionMimic(mimicList)
def gotResults_start(self,words,fullResults): global lastResult if len(words) > 2: count = int(words[2]) else: count = 1 if lastResult: for i in range(count): natlink.recognitionMimic(lastResult)
def doTestRecognition(self, words,shouldWork=1, log=None): if shouldWork: natlink.recognitionMimic(words) if log: self.log("recognised: %s"% words) else: self.doTestForException(natlink.MimicFailed,"natlink.recognitionMimic(words)",locals()) if log: self.log("did not recognise (as expected): %s"% words)
def DeferredSelectFromListDialog(List, Titles, defer, size=defaultSize): import natlink DumpData((List, Titles, defer, size), RequestFileName) SetMic('off') AppBringUp('Server',Exec=PythonServerExe,Args='/app DialogServer.py /listdialog') time.sleep(2) #try: # print 3 #does no longer generate an error when Serverapp not running? DNS 5? natlink.recognitionMimic(['Request','List','Dialog'])
def gotResults_checkalphabet(self, words, fullResults): """check the exact spoken versions of the alphabet in spokenforms """ version = unimacroutils.getDNSVersion() _spok = spokenforms.SpokenForms(self.language, version) alph = 'alphabet' ini = spokenforms.ini for letter in string.ascii_lowercase: spoken = ini.get(alph, letter, '') if not spoken: print( 'fill in in "%s_spokenform.ini", [alphabet] spoken for: "%s"' % (self.language, letter)) continue if version < 11: normalform = '%s\\%s' % (letter.upper(), spoken) else: normalform = '%s\\letter\\%s' % (letter.upper(), spoken) try: natlink.recognitionMimic([normalform]) except natlink.MimicFailed: print('invalid spoken form "%s" for "%s"' % (spoken, letter)) if spoken == spoken.lower(): spoken = spoken.capitalize() trying = 'try capitalized variant' elif spoken == spoken.capitalize(): spoken = spoken.lower() trying = 'try lowercase variant' else: continue if version < 11: normalform = '%s\\%s' % (letter.upper(), spoken) else: normalform = '%s\\letter\\%s' % (letter.upper(), spoken) try: natlink.recognitionMimic([normalform]) except natlink.MimicFailed: print('%s fails also: "%s" for "%s"' % (trying, spoken, letter)) else: print('alphabet section is corrected with: "%s = %s"' % (letter, spoken)) ini.set(alph, letter, spoken) ini.writeIfChanged()
def gotResults_nextfield(self, words, fullResults): """special commands""" dct = self.dictobj if dct.app and dct.commandnextfield: prevFocus = dct.acquireFocus() dct.activateMenuItem("commandnextfield") #this is a test.dct.updateState() dct.looseFocus(prevFocus) hndle = win32gui.GetForegroundWindow() if hndle == dct.app: print('in target application') self.wait(1) dct.sendKey("{backspace}") self.wait() natlink.recognitionMimic(["\\Cap"]) #dct.selectField() else: D("Not a valid menu command in app %s: %s" % (dct.app, dct.commandnextfield))
def repeatNow(self): debugPrint('repeatNow: %s times, %s phrases' % (self.nTimes, self.nPhrase)) # put repeat mode ON: self.repeatFlag = 1 itemrange = range(len(self.lastResults)) if not itemrange: self.repeatFlag = 0 return None itemrange = itemrange[-self.nPhrase:] #print 'na itemrange:', itemrange for n in range(self.nTimes): for i in itemrange: if not self.repeatFlag: return None # if something else happened self.repeatStuff= self.lastResults[i] natlink.recognitionMimic(self.repeatStuff) # reset repeat mode: self.repeatFlag = 0 self.repeatStuff = None
def ListDialog(self,id): if not self.Busy: #now handled by list dialog: self.ShowWindow(win32con.SW_RESTORE) self.Busy=1 (List, Titles, defer, size)=listdialogs.GetDumpedData(RequestFileName) dlg=listdialogs.MultiListDialog(Titles[0], List, colHeadings=Titles[1:], size=size, resize=1) grammar = SelectGrammar() grammar.initialize(dlg) r=dlg.DoModal() grammar.terminate() if r==win32con.IDOK: SetMic('off') listdialogs.DumpData((defer[1],dlg.Selection),ResultFileName) self.Busy=0 self.ShowWindow(win32con.SW_MINIMIZE) if r==win32con.IDOK: try: natlink.recognitionMimic(defer[0]) except: pass
def gotResults_checkalphabet(self,words,fullResults): """check the exact spoken versions of the alphabet in spokenforms """ import spokenforms version = natqh.getDNSVersion() spok = spokenforms.SpokenForms(self.language, version) alph = 'alphabet' ini = spokenforms.ini for letter in string.ascii_lowercase: spoken = ini.get(alph, letter, '') if not spoken: print 'fill in in "%s_spokenform.ini", [alphabet] spoken for: "%s"'% (self.language, letter) continue if version < 11: normalform = '%s\\%s'% (letter.upper(), spoken) else: normalform = '%s\\letter\\%s'% (letter.upper(), spoken) try: natlink.recognitionMimic([normalform]) except natlink.MimicFailed: print 'invalid spoken form "%s" for "%s"'% (spoken, letter) if spoken == spoken.lower(): spoken = spoken.capitalize() trying = 'try capitalized variant' elif spoken == spoken.capitalize(): spoken = spoken.lower() trying = 'try lowercase variant' else: continue if version < 11: normalform = '%s\\%s'% (letter.upper(), spoken) else: normalform = '%s\\letter\\%s'% (letter.upper(), spoken) try: natlink.recognitionMimic([normalform]) except natlink.MimicFailed: print '%s fails also: "%s" for "%s"'% (trying, spoken, letter) else: print 'alphabet section is corrected with: "%s = %s"'% (letter, spoken) ini.set(alph, letter, spoken) ini.writeIfChanged()
def click(self, clickType='leftclick', x=None, y=None, appName='iphoneWin'): # get the equivalent event code of the type of mouse event to perform # leftclick, rightclick, rightdouble-click (see kmap) event = self.kmap[clickType] # play events down click and then release (for left double click # increment from left button up event which produces no action # then when incremented, performs the double-click) # if coordinates are not supplied, just click if getattr(event, 'conjugate'): if not (x or y): x, y = natlink.getCursorPos() # apply vertical offset dependent on presence of "personal hotspot" # bar across the top of the screen y += self.windows.appDict[appName].vert_offset logging.debug('clicking at: %d, %d'% (x,y)) natlink.playEvents( [(wm_mousemove, x, y), (event, x, y), (event + 1, x, y)]) else: logging.error(' incorrect click look up for the event %s'% str(clickType)) # default to natlink.recognitionMimic(['mouse', 'click'])
class DragonRule(MappingRule): mapping = { "(go-to-sleep | snore | mic-sleep)": Function(lambda: natlink.setMicState("sleeping")), "(lock-Dragon|turn-mic-off)": Function(lambda: natlink.setMicState("off")), "german profile": Function( lambda: natlink.saveUser() + natlink.openUser("Codebold german")), "englisches Profil": Function(lambda: natlink.saveUser() + natlink.openUser("Codebold")), "reload grammar[s]": Function(lambda: updateAllGrammars()), "reload <grammar>": GrammarUpdate(), "como": Function(lambda: natlink.recognitionMimic( ["switch", "to", "command", "mode"])), "diemo": Function( lambda: natlink.recognitionMimic(["start", "dictation", "mode"])), "nomo": Function(lambda: natlink.recognitionMimic(["normal", "mode", "on"])), "sleemo": Function(lambda: natlink.recognitionMimic(["go", "to", "sleep"])), "dictation": Function( lambda: natlink.recognitionMimic(["show", "dictation", "box"])), "dictox": Function(lambda: natlink.recognitionMimic(["normal", "mode", "on"])) + Function( lambda: natlink.recognitionMimic(["show", "dictation", "box"])), "transfox": Function(lambda: natlink.recognitionMimic(["click", "transfer"])) + Function(lambda: natlink.recognitionMimic(["command", "mode", "on"])), "blitz NatLink": Function(blitz_natlink_status), "show NatLink": Function(show_natlink_status) } extras = [chc_base.grammar]
class WindowsRule(MappingRule): mapping = { #, "closapp": K("win:down, a-f4, win:up") # switch #, "closapp": K("alt:down, f4, alt:up") #, "closapp": K("a-f4") "closapp": Function(lambda: natlink.recognitionMimic(["close", "window"])) #, "swatch": K("alt:down") + P("20") + K("tab") + P("20") + K("alt:up") # switch app #, "swatch": K("win:down, a-tab, win:up") # switch tab , "swatch": StartApp("explorer.exe", chc_base.dir_bin + "window-switch.lnk"), "swatcha": StartApp("explorer.exe", chc_base.dir_bin + "window-switch.lnk") + K("enter"), "swap": K("c-tab") # switch tab , "putty <host_name>": DynStartApp(chc_base.exe_putty, "-load", "%(host_name)s"), "sound settings": WinCmd("C:\Windows\System32\mmsys.cpl") } extras = [chc_base.host_name]
def doTestNumbersRecognition(words, expected, info): natlink.recognitionMimic(words) testGram.checkExperiment(expected, info)
def winAction(self, actionKey='', appName='iphoneWin'): print "action" # concatenate actionKey if getattr(actionKey, 'insert'): actionKey = ' '.join(actionKey) print( str(self.__module__) + "debug: action Key of command concatenated: %s" % actionKey) # assuming the correct window is in focus # wake. Recognition mimic doesn't seem to be a good model. Something to # do with speed ofplayback etc. Grammar not always recognised as a # command. natlink.playString('{space}', 0x00) app = self.windows.appDict[str(appName)] gramList = [] if str(actionKey) in app.mimicCmds: # we want to get out of grid mode aftermouse positioning # special cases first. if str(actionKey) == 'home': natlink.recognitionMimic(['mouse', 'window']) natlink.recognitionMimic(['go']) self.click('rightclick', appName=appName) elif str(actionKey) == 'wake': natlink.recognitionMimic(['mouse', 'window']) natlink.recognitionMimic(['go']) self.click('rightclick', appName=appName) actionKey = 'drag right' natlink.recognitionMimic(['mouse', 'window']) gramList = app.mimicCmds[actionKey] print( str(self.__module__) + "debug: Grammer list for action '{0}': {1}".format( actionKey, gramList)) natlink.recognitionMimic(gramList) natlink.recognitionMimic(['go']) self.drag(dragDirection=actionKey.split()[1], dist=2) elif str(actionKey) == 'personal hotspot toggle': if app.vert_offset: app.vert_offset = 0 else: app.vert_offset = app.TOGGLE_VOFFSET print( str(self.__module__) + "debug: Toggled vertical offset, before: %d, after: %d" % (old, app.vert_offset)) elif str(actionKey).startswith("select"): pass # function continued in its own handler elif str(actionKey).startswith("show"): pass elif str(actionKey).startswith("drag"): natlink.recognitionMimic(['mouse', 'window']) gramList = app.mimicCmds[actionKey] print( str(self.__module__) + "debug: Grammer list for action '{0}': {1}".format( actionKey, gramList)) natlink.recognitionMimic(gramList) natlink.recognitionMimic(['go']) self.drag(dragDirection=actionKey.split()[1]) else: natlink.recognitionMimic(['mouse', 'window']) gramList = app.mimicCmds[actionKey] print( str(self.__module__) + "debug: Grammer list for action '{0}': {1}".format( actionKey, gramList)) natlink.recognitionMimic(gramList) natlink.recognitionMimic(['go']) self.click('leftclick', appName=appName) return 0 else: print(str(self.__module__) + 'error:unknown actionKey') return 1
def gotResults_choose(self, words, fullResults): self.nChoice = None if self.hasCommon(words[-1], 'Medium'): self.nChoice = choiceMiddle elif self.hasCommon(words[-1], 'Strong'): self.nChoice = choiceStrong elif self.hasCommon(words[-1], 'Weak'): self.nChoice = choiceWeak if self.nChoice: del words[-1] else: self.nChoice = choiceMiddle if words[-1] in ChooseList: choice = int(words[-1]) if not choice: print 'no valid choice given' natqh.Wait(0.2) self.cancelMode() natqh.returnFromMessagesWindow() return newWords = self.lastResObj.getWords(choice - 1) res = self.lastResObj.getResults(choice - 1) resCode = res[0][1] resCode = resCode & 0x7fffffff # formatting:=========================================== if 'Format' in words: if not DoFormatting: print 'formatting options invalid!' return if resCode: print 'no formatting can be done on a command!' time.sleep(1.5) elif len(newWords) > 1: print 'no formatting can be done on a list of words' time.sleep(1.5) else: self.newWord = newWords[0] fKeys = FORMATS.keys() fKeys.sort() fcKeys = FormatComments.keys() fcKeys.sort() if fKeys != fcKeys: print 'keys of FORMATS and FormatComments do not match' return numChoices = len(fKeys) if language == 'nld': print 'Formatteren van: %s' % self.newWord print 'Kies Format 1, ..., %i of zeg "Annuleren"' % numChoices elif language == 'enx': print 'Formating: %s' % self.newWord print 'Choose Format 1, ..., %i, or say "Cancel"' % numChoices else: print 'invalid language, skip this' self.cancelMode() return for n in range(numChoices): print '%s:\t%s' % (n + 1, FormatComments[n + 1]) # Entered the new exclusive grammar rules, for the right # format to be chosen self.oopsFlag = 3 self.activateSet(['inoops2'], exclusive=1) return # deleting:=========================================== elif self.hasCommon(words, ['Delete', 'Verwijder']): if resCode: print 'no delete of a command!' time.sleep(1.5) elif len(newWords) > 1: print 'no delete on a list of words' time.sleep(1.5) else: natlink.deleteWord(newWords[0]) print 'deleted: %s' % newWords[0] elif self.hasCommon(words, ['Properties', 'Eigenschappen']): if resCode: print 'no properties on a command!' time.sleep(1.0) elif len(newWords) > 1: print 'no properties of a list of words' time.sleep(1.0) else: self.newWord = newWords[0] props = natlink.getWordInfo(self.newWord) print 'properties of %s: %x' % (self.newWord, props) p = natqh.ListOfProperties(props) if p: for pp in p: print pp time.sleep(4.0) elif self.hasCommon(words, ['Choose', 'Kies', 'OK']): hadChoose = 1 print 'correcting: %s (%s times)' % (newWords, self.nChoice) for i in range(self.nChoice): result = self.lastResObj.correction(newWords) if not result: print 'correction failed' break else: print 'corrected %s times' % self.nChoice else: print 'invalid word in command: %s' % ` words ` time.sleep(2.0) time.sleep(1.0) self.cancelMode() natqh.returnFromMessagesWindow() # Like in DragonDictate, when the word was not a command but a # dictate word, the last phrase is scratched and replaced by the new # text or the new command. if hadChoose and self.FirstIsDictate: print 'mimic first: %s' % ScratchThatCommand natlink.recognitionMimic(ScratchThatCommand) print 'now mimic: %s' % newWords natlink.recognitionMimic(newWords)
def winAction(self, actionKey='', appName='iphoneWin'): print "action" # concatenate actionKey if getattr(actionKey, 'insert'): actionKey = ' '.join(actionKey) print(str(self.__module__) + "debug: action Key of command concatenated: %s"% actionKey) # assuming the correct window is in focus # wake. Recognition mimic doesn't seem to be a good model. Something to # do with speed ofplayback etc. Grammar not always recognised as a # command. natlink.playString('{space}', 0x00) app = self.windows.appDict[str(appName)] gramList = [] if str(actionKey) in app.mimicCmds: # we want to get out of grid mode aftermouse positioning # special cases first. if str(actionKey) == 'home': natlink.recognitionMimic(['mouse', 'window']) natlink.recognitionMimic(['go']) self.click('rightclick',appName=appName) elif str(actionKey) == 'wake': natlink.recognitionMimic(['mouse', 'window']) natlink.recognitionMimic(['go']) self.click('rightclick',appName=appName) actionKey = 'drag right' natlink.recognitionMimic(['mouse', 'window']) gramList = app.mimicCmds[actionKey] print(str(self.__module__) + "debug: Grammer list for action '{0}': {1}".format( actionKey, gramList)) natlink.recognitionMimic(gramList) natlink.recognitionMimic(['go']) self.drag(dragDirection=actionKey.split()[1], dist=2) elif str(actionKey) == 'personal hotspot toggle': if app.vert_offset: app.vert_offset = 0 else: app.vert_offset = app.TOGGLE_VOFFSET print(str(self.__module__) + "debug: Toggled vertical offset, before: %d, after: %d"% (old, app.vert_offset)) elif str(actionKey).startswith("select"): pass # function continued in its own handler elif str(actionKey).startswith("show"): pass elif str(actionKey).startswith("drag"): natlink.recognitionMimic(['mouse', 'window']) gramList = app.mimicCmds[actionKey] print(str(self.__module__) + "debug: Grammer list for action '{0}': {1}".format( actionKey, gramList)) natlink.recognitionMimic(gramList) natlink.recognitionMimic(['go']) self.drag(dragDirection=actionKey.split()[1]) else: natlink.recognitionMimic(['mouse', 'window']) gramList = app.mimicCmds[actionKey] print(str(self.__module__) + "debug: Grammer list for action '{0}': {1}".format( actionKey, gramList)) natlink.recognitionMimic(gramList) natlink.recognitionMimic(['go']) self.click('leftclick',appName=appName) return 0 else: print(str(self.__module__) + 'error:unknown actionKey') return 1
def gotResults_abrvPhrase(self, words, fullResults): phrase=self.abrvMap[' '.join(words)] natlink.recognitionMimic(phrase.split())
def gotResults_test(self, words, fullResults): self.cancelMode() print('words in test', words) natlink.recognitionMimic(words)
def say(self, utterance, user_input=None, never_bypass_sr_recog=0, echo_utterance=0, echo_cmd=0): """Simulate an utterance *STR utterance* *STR utterance* -- The utterance. This can be a string with the written form of what should be recognised by the SR system. If it's a list, it should be a list of words in their written\spoken form (or just written if it doesn't have a spoken form different from its written form). In general, it's better to specify *utterance* as a list of written\spoken words because it allows to simulate exactly what the SR does (e.g. what if the SR recognises an LSA as a sequence of words instead of its written\spoken form?) *STR user_input* -- A string that will be sent to the mediator console's standard input. Use in automated regression testing, if the *say* command requires user additional user input (e.g. confirmation of a symbol match). *BOOL echo_utterance=0* -- If true, echo the utterance on STDOUT. BOOL *never_bypass_sr_recog* -- If *TRUE*, the interpretation will always be done through NatLink's recognitionMimic function, even if the 'bypass' switch was on. Examples: say('x not equal to') -> 'x != ' say(['x', ' != \\not equal to'] -> 'x != ' """ global sleep_before_recognitionMimic if self.should_exit: trace('SimCmdsObj.say', 'cancelling testing') raise mediator_exceptions.CancelTesting() if echo_cmd: self.echo_command('say', utterance, user_input, never_bypass_sr_recog, echo_utterance) # print 'Saying: %s' % utterance sys.stdout.flush() if echo_utterance: print 'Saying: %s' % utterance if user_input: # # Create temporary user input file # old_stdin = sys.stdin temp_file = None if 0: temp_file_name = vc_globals.tmp + os.sep + 'user_input.dat' temp_file = open(temp_file_name, 'w') # print 'temp file opened for writing' sys.stdout.flush() temp_file.write(user_input) temp_file.close() temp_file = open(temp_file_name, 'r') # print 'temp file opened for reading' sys.stdin = temp_file else: sys.stdin = StringIO(user_input) sys.stdout.flush() try: if self.bypass_sr_recog and not never_bypass_sr_recog: trace('SimCmdsObj.say', 'bypassing NatSpeak') sys.stdout.flush() if util.islist(utterance) or util.istuple(utterance): spoken = self.utterance_spoken_forms(utterance) else: utterance = re.split('\s+', utterance) spoken = utterance print "Heard %s" % string.join(spoken) dictation_allowed = self.app.recog_begin(None) self.app.synchronize_with_app() buff_name = self.app.curr_buffer_name() active_field = self.app.active_field() dictation_allowed = dictation_allowed and \ (active_field == None) if self.testing and not dictation_allowed: trace('SimCmdsObj.say', 'cancelling testing') raise mediator_exceptions.CancelTesting() self.interp.interpret_NL_cmd(utterance, self.app) self.app.recog_end() self.show_buff() else: trace('SimCmdsObj.say', 'NOT bypassing NatSpeak') if util.islist(utterance) or util.istuple(utterance): words = [] # # Clean up the written form in case user didn't type # special characters in the form that the SR expects # (e.g. '\n' instead of '{Enter}' # for a_word in utterance: # Make sure word is in-vocabulary make_sure_word_is_in_vocab(a_word) # don't want to clean any more spoken, written = sr_interface.spoken_written_form( a_word, clean_written=0, clean_spoken=0) if spoken != written: # don't want to do this any more # written = sr_interface.clean_written_form(written, clean_for='sr') words = words + [ sr_interface.vocabulary_entry(spoken, written) ] else: words = words + [written] else: words = re.split('\s+', utterance) for a_word in words: make_sure_word_is_in_vocab(a_word) trace('SimCmdsObj.say', 'words=%s' % words) # for word in words: # print word, natlink.getWordInfo(word) # print '-- mediator.say: words=%s' % words sys.stderr.flush() # # During interactive sessions, may need to pause a few seconds before # doing *recognitionMimic*, to give user time to switch to the editor # window. # if sleep_before_recognitionMimic: print '\n\n********************\nPlease click on the editor window before I "say" your utterance.\nYou have %s seconds to do so.\n********************' % sleep_before_recognitionMimic time.sleep(sleep_before_recognitionMimic) sys.stderr.flush() natlink.recognitionMimic(words) sys.stderr.flush() if not self.app.alive: trace('SimCmdsObj.say', 'about to raise socket error') sys.stderr.flush() raise \ SocketError("socket connection broken during callbacks") if self.should_exit: trace('SimCmdsObj.say', 'cancelling testing') sys.stderr.flush() raise mediator_exceptions.CancelTesting() finally: sys.stderr.flush() # # Redirect stdin back to what it was # if user_input: sys.stdin = old_stdin if not (temp_file is None): temp_file.close()
def gotResults_testrule(self, words, fullResults): """special for testing the synchronisation between dictObj and real window one: just dictate a few words two: dictate on two lines (with empty in between) three: select the word "second" on the second line of text four: put a larger text in the window (so it scrolls) and select text. """ dct = self.dictobj dct.clearBoth() test = words[-1] if test == 'all': # get list of keywords in the inifile: tests = self.ini.get('global_tests') if tests: print('testwords: %s' % tests) for lastWord in tests: natlink.recognitionMimic( ["global", "dictation", "test", lastWord]) return else: print('found no "global dictations tests"') return if test == "one": # test just one line of text, to be found in dictobj and in actual window (through messagefunctions) natlink.recognitionMimic(["hello", "test", "one"]) time.sleep(0.5) beforeLength = dct.dictObj.getLength() beforeText = dct.dictObj.getText(0, beforeLength) messText = dct.getWindowText() dct.updateState() afterLength = dct.dictObj.getLength() afterText = dct.dictObj.getText(0, beforeLength) expected = [ "Hello test one " + dct.aftertext, "Hello test one" + dct.aftertext ] # 0: other window, 1: window has focus if not self.assert_equal_strings( expected, beforeText, "test %s; beforeText (dctobj) matches expected??" % test): return if not self.assert_equal_strings( beforeText, messText, "test %s; beforeText, messText equal test" % test): return if not self.assert_equal_strings( beforeText, afterText, "test %s; beforeText, afterText equal test" % test): return print('test %s OK--' % test) return 1 # OK elif test in ('two', 'three'): thirdWord = "second" if test == 'three': thirdWord = "third" # send a three line (two paragraph) text, and test the result: natlink.recognitionMimic( ["hello", "\\New-Paragraph", thirdWord, "test"]) time.sleep(0.5) beforeLength = dct.dictObj.getLength() beforeText = dct.dictObj.getText(0, beforeLength) messText = dct.getWindowText() dct.updateState() if test == ('two', 'three'): afterLength = dct.dictObj.getLength() afterText = dct.dictObj.getText(0, beforeLength) expected = [ "Hello\r\%s test %s" (thirdWord.capitalize(), dct.aftertext), "Hello\r\r%s test" % (thirdWord.capitalize(), dct.aftertext) ] # 0: other window, 1: window has focus if not self.assert_equal_strings( expected, beforeText, "test %s; beforeText (dctobj) matches expected??" % test): return if not self.assert_equal_strings( beforeText, messText, "test %s; beforeText, messText equal test" % test): return if not self.assert_equal_strings( beforeText, afterText, "test %s; beforeText, afterText equal test" % test): return if test == 'two': print('test %s OK--' % test) return 1 if test == 'three': # select a word on the third line (second paragraph) natlink.recognitionMimic(["select", "third"]) time.sleep(0.5) beforeSel = dct.dictObj.getTextSel() dctSelection = beforeText[beforeSel[0]:beforeSel[1]] messSel = mess.getSelection(dct.ctrl) messSelection = messText[messSel[0]:messSel[1]] expected = "Third " if not self.assert_equal_strings( expected, dctSelection, "test %s; selection from dctobj " % test): return if not self.assert_equal_strings( expected, messSelection, "test %s; selection from message window " % test): return print('test %s OK--' % test) return 1 # OK if test == "four": natlink.recognitionMimic( ["test", "four", "\\New-Paragraph", "there", "we", "go"]) natlink.recognitionMimic(["insert", "test", "fracture"]) natlink.recognitionMimic(["hello", "after", "fracture"]) natlink.recognitionMimic(["select", "hello", "after"]) time.sleep(0.5) beforeLength = dct.dictObj.getLength() beforeText = dct.dictObj.getText(0, beforeLength) beforeSel = dct.dictObj.getTextSel() dctSelection = beforeText[beforeSel[0]:beforeSel[1]] messText = dct.getWindowText() messSel = mess.getSelection(dct.ctrl) messSelection = messText[messSel[0]:messSel[1]] expected = "Hello after " if not self.assert_equal_strings( expected, dctSelection, "test %s; selection from dctobj " % test): return if not self.assert_equal_strings( expected, messSelection, "test %s; selection from message window " % test): return print('test %s OK--' % test) return 1 # OK if test == 'five': natlink.recognitionMimic(["test", ",\\comma", "five"]) natlink.recognitionMimic(["select", "test"]) natlink.recognitionMimic(["hello", "again", "five"]) self.wait() beforeLength = dct.dictObj.getLength() beforeText = dct.dictObj.getText(0, beforeLength) beforeSel = dct.dictObj.getTextSel() messText = dct.getWindowText() messSel = mess.getSelection(dct.ctrl) expected = "Hello again, five" if not self.assert_equal_strings( expected, beforeText, "test %s; text from dctobj " % test): return if not self.assert_equal_strings( expected, messText, "test %s; text from window " % test): return print('test %s OK--' % test) return 1 # OK if test == 'six': # try to make a field, and see if next dictate removes the field and caps the first char natlink.recognitionMimic(["insert", "impression"]) dct.sendKey(" ") dct.sendKey("[") dct.sendKey("x") dct.sendKey("]") natlink.recognitionMimic(["select", "x\\xray"]) self.wait() beforeLength = dct.dictObj.getLength() beforeText = dct.dictObj.getText(0, beforeLength) beforeSel = dct.dictObj.getTextSel() messText = dct.getWindowText() messSel = mess.getSelection(dct.ctrl) expected = '\r\rImpression: [x]\r' if not self.assert_equal_strings( expected, beforeText, "test %s; text from dctobj " % test): return if not self.assert_equal_strings( expected, messText, "test %s; text from window " % test): return dctSelection = beforeText[beforeSel[0]:beforeSel[1]] messSelection = messText[messSel[0]:messSel[1]] expected = "[x]" if not self.assert_equal_strings( expected, dctSelection, "test %s; selection from dctobj (selected field) " % test): return if not self.assert_equal_strings( expected, messSelection, "test %s; selection from message window (selected field) " % test): return print('test %s OK--' % test) return 1 # OK if test == 'seven': # try to make a field, and see if next dictate removes the field and caps the first char natlink.recognitionMimic(["insert", "findings"]) dct.sendKey(" ") dct.sendKey("[") dct.sendKey("x") dct.sendKey("]") natlink.recognitionMimic(["select", "x\\xray"]) self.wait() natlink.recognitionMimic(["insert", "test", "bone"]) beforeLength = dct.dictObj.getLength() beforeText = dct.dictObj.getText(0, beforeLength) beforeSel = dct.dictObj.getTextSel() messText = dct.getWindowText() messSel = mess.getSelection(dct.ctrl) expected = '\r\rImpression:\r\r1. Text in field \r' if not self.assert_equal_strings( expected, beforeText, "test %s; text from dctobj " % test): return if not self.assert_equal_strings( expected, messText, "test %s; text from window " % test): return print('test %s OK--' % test) return 1 # OK
def gotResults_abrvPhrase(self, words, fullResults): phrase = self.abrvMap[' '.join(words)] natlink.recognitionMimic(phrase.split())
def gotResults_notListening(self,words,fullResults): self.activateSet(['normalState'],exclusive=0) natlink.recognitionMimic(["switch", "to","spell", "mode"])
def testSimple(self): natlink.recognitionMimic("Kaiser", "dictation", "test", "one")
def gotResults_notListening(self, words, fullResults): self.activateSet(['normalState'], exclusive=0) natlink.recognitionMimic(["switch", "to", "spell", "mode"])