Пример #1
0
def minimize_window():
    '''
    Minimize foreground Window
    '''
    global lasthandle
    lasthandle = Window.get_foreground()
    Window.get_foreground().minimize()
Пример #2
0
def restore_window():
    '''
    Restores last minimized window triggered minimize_window.
    '''
    global lasthandle
    if lasthandle is None:
        printer.out("No previous window minimized by voice")
    else:
        Window.restore(lasthandle)
Пример #3
0
 def get_active_explorer(self):
     handle = Window.get_foreground().handle
     for window in collection_iter(self.application.Windows()):
         if window.HWND == handle:
             return window
     self._log.warning("%s: no active explorer." % self)
     return None
Пример #4
0
 def value(self, node):
     if node.has_child_with_name("win_names"):
         window = node.get_child_by_name("win_names").value()
         if not isinstance(window, Window):
             window = get_default_window(window)
         return window
     return Window.get_foreground()
Пример #5
0
def focus_mousegrid(gridtitle):
    '''
    Loops over active windows for MouseGrid window titles. Issue #171
    When MouseGrid window titles found focuses MouseGrid overly.
    '''
    if sys.platform.startswith('win'):
        # May not be needed for Linux/Mac OS - testing required
        try:
            for i in range(9):
                matches = Window.get_matching_windows(title=gridtitle,
                                                      executable="python")
                if not matches:
                    Pause("50").execute()
                else:
                    break
            if matches:
                for handle in matches:
                    handle.set_foreground()
                    break
            else:
                printer.out(
                    "`Title: `{}` no matching windows found".format(gridtitle))
        except Exception as e:
            printer.out("Error focusing MouseGrid: {}".format(e))
    else:
        pass
Пример #6
0
    def mimic(self, words):
        """ Mimic a recognition of the given *words*. """
        # Handle string input.
        if isinstance(words, string_types):
            words = words.split()

        # Don't allow non-iterable objects.
        if not iter(words):
            raise TypeError("%r is not a string or other iterable object" %
                            words)

        # Notify observers that a recognition has begun.
        self._recognition_observer_manager.notify_begin()

        # Generate the input for process_words.
        words_rules = self.generate_words_rules(words)

        # Call process_begin and process_words for all grammar wrappers,
        # stopping early if processing occurred.
        fg_window = Window.get_foreground()
        processing_occurred = False
        for wrapper in self._grammar_wrappers.values():
            wrapper.process_begin(fg_window)
            processing_occurred = wrapper.process_words(words_rules)
            if processing_occurred:
                break

        # If no processing occurred, then the mimic failed.
        if not processing_occurred:
            self._recognition_observer_manager.notify_failure()
            raise MimicFailure("No matching rule found for words %r." %
                               (words, ))
Пример #7
0
    def _speech_start_callback(self, mimicking):
        # Get context info.
        fg_window = Window.get_foreground()
        window_info = {
            "executable": fg_window.executable,
            "title": fg_window.title,
            "handle": fg_window.handle,
        }

        # Call process_begin for all grammars so that any out of context
        # grammar will not be used.
        for wrapper in self._grammar_wrappers.copy().values():
            wrapper.process_begin(**window_info)

        if not mimicking:
            # Trim excess audio buffers from the start of the list. Keep a maximum 1
            # second of silence before speech start was detected. This should help
            # increase the performance of batch reprocessing later.
            chunk = self.config.FRAMES_PER_BUFFER
            rate = self.config.RATE
            seconds = 1
            n_buffers = int(rate / chunk * seconds)
            self._audio_buffers = self._audio_buffers[-1 * n_buffers:]

        # Notify observers
        self._recognition_observer_manager.notify_begin()
Пример #8
0
def get_window_by_title(title=None):
    # returns 0 if nothing found
    Matches = Window.get_matching_windows(title=title)
    if Matches:
        return Matches[0].handle
    else:
        return 0
Пример #9
0
 def _process_recognition(self, node, extras):
     name = str(extras["name"])
     window = Window.get_foreground()
     window.name = name
     window_names[name] = window
     self._log.debug("%s: named foreground window '%s'." %
                     (self, window))
Пример #10
0
    def disconnect(self):
        """ Disconnect from natlink. """
        # Unload all grammars from the engine so that Dragon doesn't keep
        # recognizing them.
        for grammar in self.grammars:
            grammar.unload()

        # Close the the waitForSpeech() dialog box if it is active.
        from dragonfly import Window
        target_title = "Natlink / Python Subsystem"
        for window in Window.get_matching_windows(title=target_title):
            if window.is_visible:
                try:
                    window.close()
                except pywintypes.error:
                    pass
                break

        # Stop the special timer thread if it is running.
        if self._timer_thread:
            self._timer_thread.stop()
            self._timer_thread = None

        # Finally disconnect from natlink.
        self.natlink.natDisconnect()
Пример #11
0
 def value(self, node):
     if node.has_child_with_name("win_names"):
         window = node.get_child_by_name("win_names").value()
         if not isinstance(window, Window):
             window = get_default_window(window)
         return window
     return Window.get_foreground()
Пример #12
0
 def get_active_explorer(self):
     handle = Window.get_foreground().handle
     for window in collection_iter(self.application.Windows()):
         if window.HWND == handle:
             return window
     self._log.warning("%s: no active explorer." % self)
     return None
Пример #13
0
    def mimic(self, words):
        """ Mimic a recognition of the given *words*. """
        # Handle string input.
        if isinstance(words, string_types):
            words = words.split()

        # Don't allow non-iterable objects.
        if not iter(words):
            raise TypeError("%r is not a string or other iterable object"
                            % words)

        # Notify observers that a recognition has begun.
        self._recognition_observer_manager.notify_begin()

        # Generate the input for process_words.
        words_rules = self.generate_words_rules(words)

        # Call process_begin and process_words for all grammar wrappers,
        # stopping early if processing occurred.
        fg_window = Window.get_foreground()
        processing_occurred = False
        for wrapper in self._grammar_wrappers.values():
            wrapper.process_begin(fg_window)
            processing_occurred = wrapper.process_words(words_rules)
            if processing_occurred:
                break

        # If no processing occurred, then the mimic failed.
        if not processing_occurred:
            self._recognition_observer_manager.notify_failure()
            raise MimicFailure("No matching rule found for words %r."
                               % (words,))
Пример #14
0
    def mimic(self, words, **kwargs):
        """ Mimic a recognition of the given *words*. """
        # Handle string input.
        if isinstance(words, string_types):
            words = words.split()

        # Don't allow non-iterable objects.
        if not iter(words):
            raise TypeError("%r is not a string or other iterable object" %
                            words)

        # Notify observers that a recognition has begun.
        self._recognition_observer_manager.notify_begin()

        # Generate the input for process_words.
        words_rules = self.generate_words_rules(words)

        w = Window.get_foreground()
        process_args = {
            "executable": w.executable,
            "title": w.title,
            "handle": w.handle,
        }
        # Allows optional passing of window attributes to mimic
        process_args.update(kwargs)

        # Call process_begin() for each grammar wrapper. Use a copy of
        # _grammar_wrappers in case it changes.
        for wrapper in self._grammar_wrappers.copy().values():
            wrapper.process_begin(**process_args)

        # Take another copy of _grammar_wrappers to use for processing.
        grammar_wrappers = self._grammar_wrappers.copy().values()

        # Count exclusive grammars.
        exclusive_count = 0
        for wrapper in grammar_wrappers:
            if wrapper.exclusive:
                exclusive_count += 1

        # Call process_words() for each grammar wrapper, stopping early if
        # processing occurred.
        processing_occurred = False
        for wrapper in grammar_wrappers:
            # Skip non-exclusive grammars if there are one or more exclusive
            # grammars.
            if exclusive_count > 0 and not wrapper.exclusive:
                continue

            # Process the grammar.
            processing_occurred = wrapper.process_words(words_rules)
            if processing_occurred:
                break

        # If no processing occurred, then the mimic failed.
        if not processing_occurred:
            self._recognition_observer_manager.notify_failure()
            raise MimicFailure("No matching rule found for words %r." %
                               (words, ))
 def _process_recognition(self, node, extras):  # Callback when command is spoken.
     windows = Window.get_all_windows()
     #windows.sort(key=lambda x: x.executable)
     for window in windows:
         if utils.windowIsValid(window):
             executable = unicode(window.executable, errors='ignore').lower()
             title = unicode(window.title, errors='ignore').lower()
             print "{:7} : {:75} : {}".format(window.handle, executable, title)
Пример #16
0
def paste_command():
    # Add Command Prompt, putty, ...?
    context = AppContext(executable="console")
    window = Window.get_foreground()
    if context.matches(window.executable, window.title, window.handle):
        return
    release.execute()
    Key("c-v/3").execute()
Пример #17
0
def move_current_to_new(follow=False):
    wndh = Window.get_foreground().handle
    current = vda.GetCurrentDesktopNumber()
    total = vda.GetDesktopCount()
    Key("wc-d").execute()
    vda.MoveWindowToDesktopNumber(wndh, total)
    if not follow:
        vda.GoToDesktopNumber(current)
Пример #18
0
def get_application():
    window = Window.get_foreground()
    # Check all contexts. Return the name of the first one that matches or
    # "standard" if none matched.
    for name, context in contexts.items():
        if context.matches(window.executable, window.title, window.handle):
            return name
    return "standard"
Пример #19
0
def paste_command():
    # Add Command Prompt, putty, ...?
    context = AppContext(executable="console")
    window = Window.get_foreground()
    if context.matches(window.executable, window.title, window.handle):
        return
    release.execute()
    Key("c-v/3").execute()
Пример #20
0
def reload_natlink():
    """Reloads Natlink and custom Python modules."""
    win = Window.get_foreground()
    FocusWindow(executable="natspeak",
        title="Messages from Python Macros").execute()
    Pause("10").execute()
    Key("a-f, r").execute()
    Pause("10").execute()
    win.set_foreground()
Пример #21
0
 def _process_recognition(self, node,
                          extras):  # Callback when command is spoken.
     windows = Window.get_all_windows()
     #windows.sort(key=lambda x: x.executable)
     for window in windows:
         if utils.windowIsValid(window):
             print "{:7} : {:75} : {}".format(
                 window.handle, window.executable.encode("utf-8"),
                 window.title.encode("utf-8"))
Пример #22
0
def reload_natlink():
    """Reloads Natlink and custom Python modules."""
    win = Window.get_foreground()
    FocusWindow(executable="natspeak",
                title="Messages from Python Macros").execute()
    Pause("10").execute()
    Key("a-f, r").execute()
    Pause("10").execute()
    win.set_foreground()
Пример #23
0
def get_active_window_info():
    '''Returns foreground window executable_file, executable_path, title, handle, classname'''
    FILENAME_PATTERN = re.compile(r"[/\\]([\w_ ]+\.[\w]+)")
    window = Window.get_foreground()
    executable_path = str(Path(get_active_window_path()))
    match_object = FILENAME_PATTERN.findall(window.executable)
    executable_file = None
    if len(match_object) > 0:
        executable_file = match_object[0]
    return [executable_file, executable_path, window.title, window.handle, window.classname]
Пример #24
0
 def check_context(self):
     if natlink.getMicState() == "on":
         w = Window.get_foreground()
         should_be_command = self.command_contexts.matches(w.executable, w.title, w.handle)
         if should_be_command and self.mode == "normal":
             self.switch_mode("command")
         elif not should_be_command and self.mode == "command":
             self.switch_mode("normal")
         else:
             pass
Пример #25
0
    def callback(hWinEventHook, event, hwnd, idObject, idChild, dwEventThread,
                 dwmsEventTime):
        window = Window.get_foreground()
        if hwnd == window.handle:
            for grammar in engine.grammars:
                # Prevent 'notify_begin()' from being called.
                if grammar.name == "_recobs_grammar":
                    continue

                grammar.process_begin(window.executable, window.title,
                                      window.handle)
    def _process_recognition(self, node, extras):
        # Find a notepad window, set it as the target window and move it.
        #  Display a warning if no notepad window was found.
        notepad_windows = Window.get_matching_windows(executable="notepad",
                                                      title="notepad")
        if not notepad_windows:
            print("Could not find a notepad window to move.")
            return

        self.window_movement.target_window = notepad_windows[0]
        self.move_target_window(extras)
Пример #27
0
def script():
    X = in_data.X
    y = in_data.Y
    feat_names = [x.name for x in in_data.domain.attributes]
    new_feat_names = [x.split('=')[-1] for x in feat_names]
    print(feat_names)
    model_type = str(in_learners).split(',')[0].split('(')[0][1:]
    is_cont = in_data.domain.class_var.is_continuous
    class_type = 'regression' if is_cont else 'classification'
    class_names = in_data.domain.class_var.values if not is_cont else None

    shap_values = in_object
    biggest_val = np.amax(np.absolute(shap_values))

    shap_values = shap_values / biggest_val
    xrange = (-1.1, 1.1)
    num_rows_2_sample = shap_values.shape[0]
    max_display = shap_values.shape[1]
    font_size = 13

    idx = np.random.choice(list(range(shap_values.shape[0])),
                           size=num_rows_2_sample,
                           replace=False)
    shap_values = shap_values[idx, :]
    X = X[idx, :]

    if type(shap_values) == list:
        shap_values = shap_values[class_val]
        title = '{0} is {1}'.format(in_data.domain.class_var.name,
                                    class_names[class_val])
    else:
        title = in_data.domain.class_var.name

    id = len(Window.get_all_windows())
    summary_plot(shap_values,
                 features=X,
                 feature_names=feat_names,
                 class_names=class_names,
                 max_display=max_display,
                 plot_type='bar',
                 color='black',
                 id=id,
                 xrange=xrange)
    summary_plot(shap_values,
                 features=X,
                 feature_names=feat_names,
                 class_names=class_names,
                 max_display=max_display,
                 plot_type='dot',
                 title=title,
                 id=id,
                 xrange=xrange)

    return None, None, None, None
Пример #28
0
 def on_begin(self):
     window = Window.get_foreground()
     if self.is_dynamic_active and not self.context.is_dynamic_active(
             window.executable, window.title, window.handle):
         self.is_dynamic_active = False
         self.states_to_restore_on_window_focus = self.get_current_grammar_states(
         )
         self.set_current_grammar_states(self.static_grammar_states)
     elif not self.is_dynamic_active and self.context.is_dynamic_active(
             window.executable, window.title, window.handle):
         self.is_dynamic_active = True
         self.static_grammar_states = self.get_current_grammar_states()
         self.set_current_grammar_states(
             self.states_to_restore_on_window_focus)
Пример #29
0
def ensure_execution_context(data):
    '''Populates the data field of execute with context information if not
      present.'''
    if data is None:
        data = {}
    if '_proxy' not in data:
        data['_proxy'] = aenea.config.proxy_active()
    if '_server_info' not in data:
        data['_server_info'] = aenea.proxy_contexts._server_info()
    if '_proxy_context' not in data:
        data['_proxy_context'] = aenea.proxy_contexts._get_context()
    if '_context' not in data:
        data['_context'] = Window.get_foreground()
    return data
Пример #30
0
def ensure_execution_context(data):
    '''Populates the data field of execute with context information if not
      present.'''
    if data is None:
        data = {}
    if '_proxy' not in data:
        data['_proxy'] = aenea.config.proxy_active()
    if '_server_info' not in data:
        data['_server_info'] = aenea.proxy_contexts._server_info()
    if '_proxy_context' not in data:
        data['_proxy_context'] = aenea.proxy_contexts._get_context()
    if '_context' not in data:
        data['_context'] = Window.get_foreground()
    return data
 def do_scratch_n_times(self, n):
     for _ in range(n):
         try:
             # Get the number of characters to delete from the current
             # window's stack. Discard the state flags.
             scratch_number, _ = self._get_window_stack().pop()
             Key("backspace:%d" % scratch_number).execute()
         except IndexError:
             handle = self._current_window_handle
             window = Window.get_window(handle)
             exe = os.path.basename(window.executable)
             title = window.title
             print("Nothing in scratch memory for %r window "
                   "(title=%r, id=%d)" % (exe, title, handle))
             break
Пример #32
0
def get_default_window(name):
    executable, title = default_names[name]
    if executable: executable = executable.lower()
    if title: title = title.lower()
    windows = Window.get_all_windows()
    for window in windows:
        if not window.is_visible:
            continue
        elif executable and window.executable.lower().find(executable) == -1:
            continue
        elif title and window.title.lower().find(title) == -1:
            continue
        window.name = name
        win_names[name] = window
        return window
    return None
Пример #33
0
    def process_begin(self):
        """
        Start the dragonfly grammar processing.
        """
        # Get context info for the process_begin method. Dragonfly has a handy
        # static method for this:
        fg_window = Window.get_foreground()
        if sys.platform.startswith("win"):
            process_method = self.grammar.process_begin
        else:
            # Note: get_foreground() is mocked for non-Windows platforms
            # TODO Change to process_begin once cross platform contexts are working
            process_method = self.grammar._process_begin

        # Call the process begin method
        process_method(fg_window.executable, fg_window.title, fg_window.handle)
Пример #34
0
def clear_log():
    # Function to clear natlink status window
    try:
        # pylint: disable=import-error
        import natlink
        windows = Window.get_all_windows()
        matching = [
            w for w in windows if b"Messages from Python Macros" in w.title
        ]
        if matching:
            handle = (matching[0].handle)
            rt_handle = win32gui.FindWindowEx(handle, None, "RICHEDIT", None)
            win32gui.SetWindowText(rt_handle, "")
            return
    except Exception as e:
        print(e)
Пример #35
0
def get_default_window(name):
    executable, title = default_names[name]
    if executable: executable = executable.lower()
    if title: title = title.lower()
    windows = Window.get_all_windows()
    for window in windows:
        if not window.is_visible:
            continue
        elif executable and window.executable.lower().find(executable) == -1:
            continue
        elif title and window.title.lower().find(title) == -1:
            continue
        window.name = name
        win_names[name] = window
        return window
    return None
Пример #36
0
    def _speech_start_callback(self, mimicking):
        # Get context info. Dragonfly has a handy static method for this:
        fg_window = Window.get_foreground()

        # Call process_begin for all grammars so that any out of context
        # grammar will not be used.
        for wrapper in self._grammar_wrappers.values():
            wrapper.process_begin(fg_window)

        if not mimicking:
            # Trim excess audio buffers from the start of the list. Keep a maximum 1
            # second of silence before speech start was detected. This should help
            # increase the performance of batch reprocessing later.
            chunk = self.config.FRAMES_PER_BUFFER
            rate = self.config.RATE
            seconds = 1
            n_buffers = int(rate / chunk * seconds)
            self._audio_buffers = self._audio_buffers[-1 * n_buffers:]

        # Notify observers
        self._recognition_observer_manager.notify_begin()
Пример #37
0
def callback(hWinEventHook, event, hwnd, idObject, idChild, dwEventThread, dwmsEventTime):
    window = Window.get_foreground()
    if hwnd == window.handle:
        engine.window_change(window.executable, window.title, window.handle)
Пример #38
0
def blitz_natlink_status():
    win = Window.get_foreground()
    FocusWindow(executable="natspeak", title="Messages from NatLink").execute()
    Pause("100").execute()
    win.set_foreground()
Пример #39
0
def minimize_window():
    '''
    Minimize foreground Window
    '''
    Window.get_foreground().minimize()
Пример #40
0
def maximize_window():
    '''
    Maximize foreground Window
    '''
    Window.get_foreground().maximize()
Пример #41
0
 def _process_recognition(self, node, extras):
     name = str(extras["name"])
     window = Window.get_foreground()
     window.name = name
     win_names[name] = window
     self._log.debug("%s: named foreground window '%s'." % (self, window))