def get_active_explorer(self): handle = Window.get_foreground().handle for window in collection_iter(self.application.Windows()): if window.HWND == handle: return window self._log.warning("%s: no active explorer." % self) return None
def mimic(self, words): """ Mimic a recognition of the given *words*. """ # Handle string input. if isinstance(words, string_types): words = words.split() # Don't allow non-iterable objects. if not iter(words): raise TypeError("%r is not a string or other iterable object" % words) # Notify observers that a recognition has begun. self._recognition_observer_manager.notify_begin() # Generate the input for process_words. words_rules = self.generate_words_rules(words) # Call process_begin and process_words for all grammar wrappers, # stopping early if processing occurred. fg_window = Window.get_foreground() processing_occurred = False for wrapper in self._grammar_wrappers.values(): wrapper.process_begin(fg_window) processing_occurred = wrapper.process_words(words_rules) if processing_occurred: break # If no processing occurred, then the mimic failed. if not processing_occurred: self._recognition_observer_manager.notify_failure() raise MimicFailure("No matching rule found for words %r." % (words,))
def value(self, node): if node.has_child_with_name("win_names"): window = node.get_child_by_name("win_names").value() if not isinstance(window, Window): window = get_default_window(window) return window return Window.get_foreground()
def paste_command(): # Add Command Prompt, putty, ...? context = AppContext(executable="console") window = Window.get_foreground() if context.matches(window.executable, window.title, window.handle): return release.execute() Key("c-v/3").execute()
def reload_natlink(): """Reloads Natlink and custom Python modules.""" win = Window.get_foreground() FocusWindow(executable="natspeak", title="Messages from Python Macros").execute() Pause("10").execute() Key("a-f, r").execute() Pause("10").execute() win.set_foreground()
def get_active_window_info(): '''Returns foreground window executable_file, executable_path, title, handle, classname''' FILENAME_PATTERN = re.compile(r"[/\\]([\w_ ]+\.[\w]+)") window = Window.get_foreground() executable_path = str(Path(get_active_window_path())) match_object = FILENAME_PATTERN.findall(window.executable) executable_file = None if len(match_object) > 0: executable_file = match_object[0] return [executable_file, executable_path, window.title, window.handle, window.classname]
def callback(hWinEventHook, event, hwnd, idObject, idChild, dwEventThread, dwmsEventTime): window = Window.get_foreground() if hwnd == window.handle: for grammar in engine.grammars: # Prevent 'notify_begin()' from being called. if grammar.name == "_recobs_grammar": continue grammar.process_begin(window.executable, window.title, window.handle)
def on_begin(self): window = Window.get_foreground() if self.is_dynamic_active and not self.context.is_dynamic_active( window.executable, window.title, window.handle): self.is_dynamic_active = False self.states_to_restore_on_window_focus = self.get_current_grammar_states( ) self.set_current_grammar_states(self.static_grammar_states) elif not self.is_dynamic_active and self.context.is_dynamic_active( window.executable, window.title, window.handle): self.is_dynamic_active = True self.static_grammar_states = self.get_current_grammar_states() self.set_current_grammar_states( self.states_to_restore_on_window_focus)
def ensure_execution_context(data): '''Populates the data field of execute with context information if not present.''' if data is None: data = {} if '_proxy' not in data: data['_proxy'] = aenea.config.proxy_active() if '_server_info' not in data: data['_server_info'] = aenea.proxy_contexts._server_info() if '_proxy_context' not in data: data['_proxy_context'] = aenea.proxy_contexts._get_context() if '_context' not in data: data['_context'] = Window.get_foreground() return data
def process_begin(self): """ Start the dragonfly grammar processing. """ # Get context info for the process_begin method. Dragonfly has a handy # static method for this: fg_window = Window.get_foreground() if sys.platform.startswith("win"): process_method = self.grammar.process_begin else: # Note: get_foreground() is mocked for non-Windows platforms # TODO Change to process_begin once cross platform contexts are working process_method = self.grammar._process_begin # Call the process begin method process_method(fg_window.executable, fg_window.title, fg_window.handle)
def _speech_start_callback(self, mimicking): # Get context info. Dragonfly has a handy static method for this: fg_window = Window.get_foreground() # Call process_begin for all grammars so that any out of context # grammar will not be used. for wrapper in self._grammar_wrappers.values(): wrapper.process_begin(fg_window) if not mimicking: # Trim excess audio buffers from the start of the list. Keep a maximum 1 # second of silence before speech start was detected. This should help # increase the performance of batch reprocessing later. chunk = self.config.FRAMES_PER_BUFFER rate = self.config.RATE seconds = 1 n_buffers = int(rate / chunk * seconds) self._audio_buffers = self._audio_buffers[-1 * n_buffers:] # Notify observers self._recognition_observer_manager.notify_begin()
def minimize_window(): ''' Minimize foreground Window ''' Window.get_foreground().minimize()
def maximize_window(): ''' Maximize foreground Window ''' Window.get_foreground().maximize()
def get_active_window_path(): return Window.get_foreground().executable
def get_active_window_title(): return Window.get_foreground().title
def move_current_window_to_desktop(n=1, follow=False): window_handle = Window.get_foreground().handle pyvda.MoveWindowToDesktopNumber(window_handle, n) if follow: go_to_desktop_number(n)
def callback(hWinEventHook, event, hwnd, idObject, idChild, dwEventThread, dwmsEventTime): window = Window.get_foreground() if hwnd == window.handle: engine.window_change(window.executable, window.title, window.handle)
def _process_recognition(self, node, extras): name = str(extras["name"]) window = Window.get_foreground() window.name = name win_names[name] = window self._log.debug("%s: named foreground window '%s'." % (self, window))
def blitz_natlink_status(): win = Window.get_foreground() FocusWindow(executable="natspeak", title="Messages from NatLink").execute() Pause("100").execute() win.set_foreground()
def windowinfo(): wd = Window.get_foreground() print(wd.title) print(wd.executable)
def move_current_window_to_desktop(n=0, follow=False): wndh = Window.get_foreground().handle vda.MoveWindowToDesktopNumber(wndh, n - 1) if follow: vda.GoToDesktopNumber(n - 1)
def mimic(self, words, **kwargs): """ Mimic a recognition of the given *words*. :param words: words to mimic :type words: str|iter :Keyword Arguments: optional *executable*, *title* and/or *handle* keyword arguments may be used to simulate a specific foreground window context. The current foreground window attributes will be used instead for any keyword arguments not present. .. note:: Any dictation words should be all uppercase, e.g. "HELLO WORLD". Dictation words not in uppercase will result in the engine **not** decoding and recognizing the command! """ # Handle string input. if isinstance(words, string_types): words = words.split() # Don't allow non-iterable objects. if not iter(words): raise TypeError("%r is not a string or other iterable object" % words) # Fail on empty input. if not words: raise MimicFailure("Invalid mimic input %r" % words) # Notify observers that a recognition has begun. self._recognition_observer_manager.notify_begin() # Generate the input for process_words. words_rules = self.generate_words_rules(words) w = Window.get_foreground() process_args = { "executable": w.executable, "title": w.title, "handle": w.handle, } # Allows optional passing of window attributes to mimic process_args.update(kwargs) # Call process_begin() for each grammar wrapper. Use a copy of # _grammar_wrappers in case it changes. for wrapper in self._grammar_wrappers.copy().values(): wrapper.process_begin(**process_args) # Take another copy of _grammar_wrappers to use for processing. grammar_wrappers = self._grammar_wrappers.copy().values() # Count exclusive grammars. exclusive_count = 0 for wrapper in grammar_wrappers: if wrapper.exclusive: exclusive_count += 1 # Call process_words() for each grammar wrapper, stopping early if # processing occurred. processing_occurred = False for wrapper in grammar_wrappers: # Skip non-exclusive grammars if there are one or more exclusive # grammars. if exclusive_count > 0 and not wrapper.exclusive: continue # Process the grammar. processing_occurred = wrapper.process_words(words_rules) if processing_occurred: break # If no processing occurred, then the mimic failed. if not processing_occurred: self._recognition_observer_manager.notify_failure(None) raise MimicFailure("No matching rule found for words %r." % (words,))
def close_window(): ''' Close foreground Window ''' Window.get_foreground().close()
def target_foreground_window(self): """ Set the target window to the current foreground window. """ self.target_window = Window.get_foreground()
def value(self, node): if node.has_child_with_name("window_name"): return node.get_child_by_name("window_name").value() if node.words()[0] == "this": Mouse("left/3").execute() return Window.get_foreground()
def mimic(self, words, **kwargs): """ Mimic a recognition of the given *words*. """ # Handle string input. if isinstance(words, string_types): words = words.split() # Don't allow non-iterable objects. if not iter(words): raise TypeError("%r is not a string or other iterable object" % words) # Fail on empty input. if not words: raise MimicFailure("Invalid mimic input %r" % words) # Notify observers that a recognition has begun. self._recognition_observer_manager.notify_begin() # Generate the input for process_words. words_rules = self.generate_words_rules(words) w = Window.get_foreground() process_args = { "executable": w.executable, "title": w.title, "handle": w.handle, } # Allows optional passing of window attributes to mimic process_args.update(kwargs) # Call process_begin() for each grammar wrapper. Use a copy of # _grammar_wrappers in case it changes. for wrapper in self._grammar_wrappers.copy().values(): wrapper.process_begin(**process_args) # Take another copy of _grammar_wrappers to use for processing. grammar_wrappers = self._grammar_wrappers.copy().values() # Count exclusive grammars. exclusive_count = 0 for wrapper in grammar_wrappers: if wrapper.exclusive: exclusive_count += 1 # Call process_words() for each grammar wrapper, stopping early if # processing occurred. processing_occurred = False for wrapper in grammar_wrappers: # Skip non-exclusive grammars if there are one or more exclusive # grammars. if exclusive_count > 0 and not wrapper.exclusive: continue # Process the grammar. processing_occurred = wrapper.process_words(words_rules) if processing_occurred: break # If no processing occurred, then the mimic failed. if not processing_occurred: self._recognition_observer_manager.notify_failure() raise MimicFailure("No matching rule found for words %r." % (words, ))