Beispiel #1
0
  def load(self, path = None):
    """
    Reads in the contents of the given path, adding its configuration values
    to our current contents.

    :param str path: file path to be loaded, this uses the last loaded path if
      not provided

    :raises:
      * **IOError** if we fail to read the file (it doesn't exist, insufficient
        permissions, etc)
      * **ValueError** if no path was provided and we've never been provided one
    """

    if path:
      self._path = path
    elif not self._path:
      raise ValueError("Unable to load configuration: no path provided")

    with open(self._path, "r") as config_file:
      read_contents = config_file.readlines()

    with self._contents_lock:
      self._raw_contents = read_contents
      remainder = list(self._raw_contents)

      while remainder:
        line = remainder.pop(0)

        # strips any commenting or excess whitespace
        comment_start = line.find("#")

        if comment_start != -1:
          line = line[:comment_start]

        line = line.strip()

        # parse the key/value pair
        if line:
          try:
            key, value = line.split(" ", 1)
            value = value.strip()
          except ValueError:
            log.debug("Config entry '%s' is expected to be of the format 'Key Value', defaulting to '%s' -> ''" % (line, line))
            key, value = line, ""

          if not value:
            # this might be a multi-line entry, try processing it as such
            multiline_buffer = []

            while remainder and remainder[0].lstrip().startswith("|"):
              content = remainder.pop(0).lstrip()[1:]  # removes '\s+|' prefix
              content = content.rstrip("\n")           # trailing newline
              multiline_buffer.append(content)

            if multiline_buffer:
              self.set(key, "\n".join(multiline_buffer), False)
              continue

          self.set(key, value, False)
Beispiel #2
0
  def _download_descriptors(self, retries):
    try:
      use_authority = retries == 0 and self.fall_back_to_authority
      self.download_url = self._pick_url(use_authority)

      self.start_time = time.time()
      response = urllib2.urlopen(self.download_url, timeout = self.timeout).read()

      if self.download_url.endswith('.z'):
        response = zlib.decompress(response)

      self.content = response.strip()

      self.runtime = time.time() - self.start_time
      log.trace("Descriptors retrieved from '%s' in %0.2fs" % (self.download_url, self.runtime))
    except:
      exc = sys.exc_info()[1]

      if retries > 0:
        log.debug("Unable to download descriptors from '%s' (%i retries remaining): %s" % (self.download_url, retries, exc))
        return self._download_descriptors(retries - 1)
      else:
        log.debug("Unable to download descriptors from '%s': %s" % (self.download_url, exc))
        self.error = exc
    finally:
      self.is_done = True
Beispiel #3
0
def get_bsd_jail_id(pid):
  """
  Gets the jail id for a process. These seem to only exist for FreeBSD (this
  style for jails does not exist on Linux, OSX, or OpenBSD).

  :param int pid: process id of the jail id to be queried

  :returns: **int** for the jail id, zero if this can't be determined
  """

  # Output when called from a FreeBSD jail or when Tor isn't jailed:
  #   JID
  #    0
  #
  # Otherwise it's something like:
  #   JID
  #    1

  ps_output = call(GET_BSD_JAIL_ID_PS % pid)

  if ps_output and len(ps_output) == 2 and len(ps_output[1].split()) == 1:
    jid = ps_output[1].strip()

    if jid.isdigit():
      return int(jid)

  os_name = platform.system()
  if os_name == "FreeBSD":
    log.warn("Unable to get the jail id for process %s." % pid)
  else:
    log.debug("get_bsd_jail_id(%s): jail ids do not exist on %s" % (pid, os_name))

  return 0
Beispiel #4
0
 def get_str_csv(self, key, default = None, count = None, sub_key = None):
   """
   Fetches the given key as a comma separated value.
   
   :param str key: config setting to be fetched, last if multiple exists
   :param object default: value provided if no such key exists or doesn't match the count
   :param int count: if set then the default is returned when the number of elements doesn't match this value
   :param str sub_key: handle the configuration entry as a dictionary and use this key within it
   
   :returns: list with the stripped values
   """
   
   if sub_key: conf_value = self.get(key, {}).get(sub_key)
   else: conf_value = self.get_value(key)
   
   if conf_value is None: return default
   elif not conf_value.strip(): return [] # empty string
   else:
     conf_comp = [entry.strip() for entry in conf_value.split(",")]
     
     # check if the count doesn't match
     if count != None and len(conf_comp) != count:
       msg = "Config entry '%s' is expected to be %i comma separated values" % (key, count)
       if default != None and (isinstance(default, list) or isinstance(default, tuple)):
         defaultStr = ", ".join([str(i) for i in default])
         msg += ", defaulting to '%s'" % defaultStr
       
       log.debug(msg)
       return default
     
     return conf_comp
Beispiel #5
0
  def load(self, path = None):
    """
    Reads in the contents of the given path, adding its configuration values
    to our current contents.

    :param str path: file path to be loaded, this uses the last loaded path if
      not provided

    :raises:
      * **IOError** if we fail to read the file (it doesn't exist, insufficient
        permissions, etc)
      * **ValueError** if no path was provided and we've never been provided one
    """

    if path:
      self._path = path
    elif not self._path:
      raise ValueError("Unable to load configuration: no path provided")

    with open(self._path, "r") as config_file:
      read_contents = config_file.readlines()

    with self._contents_lock:
      self._raw_contents = read_contents
      remainder = list(self._raw_contents)

      while remainder:
        line = remainder.pop(0)

        # strips any commenting or excess whitespace
        comment_start = line.find("#")

        if comment_start != -1:
          line = line[:comment_start]

        line = line.strip()

        # parse the key/value pair
        if line:
          try:
            key, value = line.split(" ", 1)
            value = value.strip()
          except ValueError:
            log.debug("Config entry '%s' is expected to be of the format 'Key Value', defaulting to '%s' -> ''" % (line, line))
            key, value = line, ""

          if not value:
            # this might be a multi-line entry, try processing it as such
            multiline_buffer = []

            while remainder and remainder[0].lstrip().startswith("|"):
              content = remainder.pop(0).lstrip()[1:]  # removes '\s+|' prefix
              content = content.rstrip("\n")           # trailing newline
              multiline_buffer.append(content)

            if multiline_buffer:
              self.set(key, "\n".join(multiline_buffer), False)
              continue

          self.set(key, value, False)
  def _download_descriptors(self, retries):
    try:
      use_authority = retries == 0 and self.fall_back_to_authority
      self.download_url = self._pick_url(use_authority)

      self.start_time = time.time()
      response = urllib2.urlopen(self.download_url, timeout = self.timeout).read()

      if self.download_url.endswith('.z'):
        response = zlib.decompress(response)

      self.content = response.strip()

      self.runtime = time.time() - self.start_time
      log.trace("Descriptors retrieved from '%s' in %0.2fs" % (self.download_url, self.runtime))
    except:
      exc = sys.exc_info()[1]

      if retries > 0:
        log.debug("Unable to download descriptors from '%s' (%i retries remaining): %s" % (self.download_url, retries, exc))
        return self._download_descriptors(retries - 1)
      else:
        log.debug("Unable to download descriptors from '%s': %s" % (self.download_url, exc))
        self.error = exc
    finally:
      self.is_done = True
Beispiel #7
0
def _expand_cookie_path(protocolinfo_response, pid_resolver, pid_resolution_arg):
  """
  Attempts to expand a relative cookie path with the given pid resolver. This
  leaves the cookie_path alone if it's already absolute, **None**, or the
  system calls fail.
  """

  cookie_path = protocolinfo_response.cookie_path
  if cookie_path and not os.path.isabs(cookie_path):
    try:
      tor_pid = pid_resolver(pid_resolution_arg)

      if not tor_pid:
        raise IOError('pid lookup failed')

      tor_cwd = stem.util.system.cwd(tor_pid)

      if not tor_cwd:
        raise IOError('cwd lookup failed')

      cookie_path = stem.util.system.expand_path(cookie_path, tor_cwd)
    except IOError as exc:
      resolver_labels = {
        stem.util.system.pid_by_name: ' by name',
        stem.util.system.pid_by_port: ' by port',
        stem.util.system.pid_by_open_file: ' by socket file',
      }

      pid_resolver_label = resolver_labels.get(pid_resolver, '')
      log.debug('unable to expand relative tor cookie path%s: %s' % (pid_resolver_label, exc))

  protocolinfo_response.cookie_path = cookie_path
Beispiel #8
0
    async def _download_descriptors(self, retries: int,
                                    timeout: Optional[float]) -> None:
        self.start_time = time.time()

        retries = self.retries
        time_remaining = self.timeout

        while True:
            endpoint = self._pick_endpoint(
                use_authority=retries == 0 and self.fall_back_to_authority)

            if isinstance(endpoint, stem.ORPort):
                downloaded_from = 'ORPort %s:%s (resource %s)' % (
                    endpoint.address, endpoint.port, self.resource)
            elif isinstance(endpoint, stem.DirPort):
                downloaded_from = 'http://%s:%i/%s' % (
                    endpoint.address, endpoint.port, self.resource.lstrip('/'))
                self.download_url = downloaded_from
            else:
                raise ValueError(
                    "BUG: endpoints can only be ORPorts or DirPorts, '%s' was a %s"
                    % (endpoint, type(endpoint).__name__))

            try:
                response = await asyncio.wait_for(
                    self._download_from(endpoint), time_remaining)
                self.content, self.reply_headers = _http_body_and_headers(
                    response)

                self.is_done = True
                self.runtime = time.time() - self.start_time

                log.trace('Descriptors retrieved from %s in %0.2fs' %
                          (downloaded_from, self.runtime))
                return
            except asyncio.TimeoutError as exc:
                self.is_done = True
                self.error = stem.DownloadTimeout(downloaded_from, exc,
                                                  sys.exc_info()[2],
                                                  self.timeout)
                return
            except:
                exception = sys.exc_info()[1]
                retries -= 1

                if time_remaining is not None:
                    time_remaining -= time.time() - self.start_time

                if retries > 0:
                    log.debug(
                        "Failed to download descriptors from '%s' (%i retries remaining): %s"
                        % (downloaded_from, retries, exception))
                else:
                    log.debug("Failed to download descriptors from '%s': %s" %
                              (self.download_url, exception))

                    self.is_done = True
                    self.error = exception
                    return
Beispiel #9
0
    def _post_authentication(self):
        log = logging.getLogger('theonionbox')
        log.debug('BaseController._post_authentication()!')

        super(BaseController, self)._post_authentication()

        if self.callback is not None:
            self.callback()
Beispiel #10
0
    def __init__(self):
        nyx.panel.Panel.__init__(self)

        self._contents = []
        self._scroller = nyx.curses.CursorScroller()
        self._sort_order = CONFIG['features.config.order']
        self._show_all = False  # show all options, or just the important ones

        cached_manual_path = os.path.join(DATA_DIR, 'manual')

        if os.path.exists(cached_manual_path):
            manual = stem.manual.Manual.from_cache(cached_manual_path)
        else:
            try:
                manual = stem.manual.Manual.from_man()

                try:
                    manual.save(cached_manual_path)
                except IOError as exc:
                    log.debug(
                        "Unable to cache manual information to '%s'. This is fine, but means starting Nyx takes a little longer than usual: "
                        % (cached_manual_path, exc))
            except IOError as exc:
                log.debug(
                    "Unable to use 'man tor' to get information about config options (%s), using bundled information instead"
                    % exc)
                manual = stem.manual.Manual.from_cache()

        try:
            for line in tor_controller().get_info('config/names').splitlines():
                # Lines of the form "<option> <type>[ <documentation>]". Documentation
                # was apparently only in old tor versions like 0.2.1.25.

                if ' ' not in line:
                    continue

                line_comp = line.split()
                name, value_type = line_comp[0], line_comp[1]

                # skips private and virtual entries if not configured to show them

                if name.startswith('__') and not CONFIG[
                        'features.config.state.showPrivateOptions']:
                    continue
                elif value_type == 'Virtual' and not CONFIG[
                        'features.config.state.showVirtualOptions']:
                    continue

                self._contents.append(ConfigEntry(name, value_type, manual))

            self._contents = sorted(
                self._contents,
                key=lambda entry:
                [entry.sort_value(field) for field in self._sort_order])
        except stem.ControllerError as exc:
            log.warn(
                'Unable to determine the configuration options tor supports: %s'
                % exc)
Beispiel #11
0
def _log_failure(parameter, exc):
    """
  Logs a message indicating that the proc query failed.

  :param str parameter: description of the proc attribute being fetch
  :param Exception exc: exception that we're raising
  """

    log.debug('proc call failed (%s): %s' % (parameter, exc))
Beispiel #12
0
def _log_failure(parameter, exc):
  """
  Logs a message indicating that the proc query failed.

  :param str parameter: description of the proc attribute being fetch
  :param Exception exc: exception that we're raising
  """

  log.debug("proc call failed (%s): %s" % (parameter, exc))
Beispiel #13
0
def call(command, default = UNDEFINED, ignore_exit_status = False):
  """
  Issues a command in a subprocess, blocking until completion and returning the
  results. This is not actually ran in a shell so pipes and other shell syntax
  are not permitted.

  :param str,list command: command to be issued
  :param object default: response if the query fails
  :param bool ignore_exit_status: reports failure if our command's exit status
    was non-zero

  :returns: **list** with the lines of output from the command

  :raises: **OSError** if this fails and no default was provided
  """

  if isinstance(command, str):
    command_list = command.split(' ')
  else:
    command_list = command

  try:
    is_shell_command = command_list[0] in SHELL_COMMANDS

    start_time = time.time()
    process = subprocess.Popen(command_list, stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = is_shell_command)

    stdout, stderr = process.communicate()
    stdout, stderr = stdout.strip(), stderr.strip()
    runtime = time.time() - start_time

    log.debug('System call: %s (runtime: %0.2f)' % (command, runtime))
    trace_prefix = 'Received from system (%s)' % command

    if stdout and stderr:
      log.trace(trace_prefix + ', stdout:\n%s\nstderr:\n%s' % (stdout, stderr))
    elif stdout:
      log.trace(trace_prefix + ', stdout:\n%s' % stdout)
    elif stderr:
      log.trace(trace_prefix + ', stderr:\n%s' % stderr)

    exit_code = process.poll()

    if not ignore_exit_status and exit_code != 0:
      raise OSError('%s returned exit status %i' % (command, exit_code))

    if stdout:
      return stdout.decode('utf-8', 'replace').splitlines()
    else:
      return []
  except OSError as exc:
    log.debug('System call (failed): %s (error: %s)' % (command, exc))

    if default != UNDEFINED:
      return default
    else:
      raise exc
Beispiel #14
0
def call(command, default=UNDEFINED, ignore_exit_status=False):
    """
  Issues a command in a subprocess, blocking until completion and returning the
  results. This is not actually ran in a shell so pipes and other shell syntax
  are not permitted.

  :param str command: command to be issued
  :param object default: response if the query fails
  :param bool ignore_exit_status: reports failure if our command's exit status
    was non-zero

  :returns: **list** with the lines of output from the command

  :raises: **OSError** if this fails and no default was provided
  """

    try:
        is_shell_command = command.split(" ")[0] in SHELL_COMMANDS

        start_time = time.time()
        process = subprocess.Popen(command.split(),
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE,
                                   shell=is_shell_command)

        stdout, stderr = process.communicate()
        stdout, stderr = stdout.strip(), stderr.strip()
        runtime = time.time() - start_time

        log.debug("System call: %s (runtime: %0.2f)" % (command, runtime))
        trace_prefix = "Received from system (%s)" % command

        if stdout and stderr:
            log.trace(trace_prefix + ", stdout:\n%s\nstderr:\n%s" %
                      (stdout, stderr))
        elif stdout:
            log.trace(trace_prefix + ", stdout:\n%s" % stdout)
        elif stderr:
            log.trace(trace_prefix + ", stderr:\n%s" % stderr)

        exit_code = process.poll()

        if not ignore_exit_status and exit_code != 0:
            raise OSError("%s returned exit status %i" % (command, exit_code))

        if stdout:
            return stdout.decode("utf-8", "replace").splitlines()
        else:
            return []
    except OSError as exc:
        log.debug("System call (failed): %s (error: %s)" % (command, exc))

        if default != UNDEFINED:
            return default
        else:
            raise exc
Beispiel #15
0
  def __init__(self, use_mirrors = False, **default_args):
    self._default_args = default_args
    self._endpoints = DIRECTORY_AUTHORITIES.values()

    if use_mirrors:
      try:
        start_time = time.time()
        self.use_directory_mirrors()
        log.debug("Retrieved directory mirrors (took %0.2fs)" % (time.time() - start_time))
      except Exception as exc:
        log.debug("Unable to retrieve directory mirrors: %s" % exc)
Beispiel #16
0
def _log_runtime(parameter, proc_location, start_time):
  """
  Logs a message indicating a successful proc query.

  :param str parameter: description of the proc attribute being fetch
  :param str proc_location: proc files we were querying
  :param int start_time: unix time for when this query was started
  """

  runtime = time.time() - start_time
  log.debug("proc call (%s): %s (runtime: %0.4f)" % (parameter, proc_location, runtime))
Beispiel #17
0
def _log_runtime(parameter, proc_location, start_time):
  """
  Logs a message indicating a successful proc query.

  :param str parameter: description of the proc attribute being fetch
  :param str proc_location: proc files we were querying
  :param int start_time: unix time for when this query was started
  """

  runtime = time.time() - start_time
  log.debug("proc call (%s): %s (runtime: %0.4f)" % (parameter, proc_location, runtime))
Beispiel #18
0
  def __init__(self, use_mirrors = False, **default_args):
    self._default_args = default_args

    self._endpoints = None

    if use_mirrors:
      try:
        start_time = time.time()
        self.use_directory_mirrors()
        log.debug('Retrieved directory mirrors (took %0.2fs)' % (time.time() - start_time))
      except Exception as exc:
        log.debug('Unable to retrieve directory mirrors: %s' % exc)
Beispiel #19
0
  def __init__(self, use_mirrors = False, **default_args):
    self._default_args = default_args

    authorities = filter(HAS_V3IDENT, get_authorities().values())
    self._endpoints = [(auth.address, auth.dir_port) for auth in authorities]

    if use_mirrors:
      try:
        start_time = time.time()
        self.use_directory_mirrors()
        log.debug("Retrieved directory mirrors (took %0.2fs)" % (time.time() - start_time))
      except Exception as exc:
        log.debug("Unable to retrieve directory mirrors: %s" % exc)
  def __init__(self, use_mirrors = False, **default_args):
    self._default_args = default_args

    authorities = filter(HAS_V3IDENT, get_authorities().values())
    self._endpoints = [(auth.address, auth.dir_port) for auth in authorities]

    if use_mirrors:
      try:
        start_time = time.time()
        self.use_directory_mirrors()
        log.debug('Retrieved directory mirrors (took %0.2fs)' % (time.time() - start_time))
      except Exception as exc:
        log.debug('Unable to retrieve directory mirrors: %s' % exc)
Beispiel #21
0
    def __init__(self, use_mirrors: bool = False, **default_args: Any) -> None:
        self._default_args = default_args

        self._endpoints = None  # type: Optional[List[stem.DirPort]]

        if use_mirrors:
            try:
                start_time = time.time()
                self.use_directory_mirrors()
                log.debug('Retrieved directory mirrors (took %0.2fs)' %
                          (time.time() - start_time))
            except Exception as exc:
                log.debug('Unable to retrieve directory mirrors: %s' % exc)
Beispiel #22
0
  def __init__(self):
    nyx.panel.Panel.__init__(self)

    self._contents = []
    self._scroller = nyx.curses.CursorScroller()
    self._sort_order = CONFIG['features.config.order']
    self._show_all = False  # show all options, or just the important ones

    cached_manual_path = os.path.join(DATA_DIR, 'manual')

    if os.path.exists(cached_manual_path):
      manual = stem.manual.Manual.from_cache(cached_manual_path)
    else:
      try:
        manual = stem.manual.Manual.from_man()

        try:
          manual.save(cached_manual_path)
        except IOError as exc:
          log.debug("Unable to cache manual information to '%s'. This is fine, but means starting Nyx takes a little longer than usual: " % (cached_manual_path, exc))
      except IOError as exc:
        log.debug("Unable to use 'man tor' to get information about config options (%s), using bundled information instead" % exc)
        manual = stem.manual.Manual.from_cache()

    try:
      for line in tor_controller().get_info('config/names').splitlines():
        # Lines of the form "<option> <type>[ <documentation>]". Documentation
        # was apparently only in old tor versions like 0.2.1.25.

        if ' ' not in line:
          continue

        line_comp = line.split()
        name, value_type = line_comp[0], line_comp[1]

        # skips private and virtual entries if not configured to show them

        if name.startswith('__') and not CONFIG['features.config.state.showPrivateOptions']:
          continue
        elif value_type == 'Virtual' and not CONFIG['features.config.state.showVirtualOptions']:
          continue

        self._contents.append(ConfigEntry(name, value_type, manual))

      self._contents = sorted(self._contents, key = lambda entry: [entry.sort_value(field) for field in self._sort_order])
    except stem.ControllerError as exc:
      log.warn('Unable to determine the configuration options tor supports: %s' % exc)
Beispiel #23
0
    def get_int_csv(self, key, default=None, count=None, min_value=None, max_value=None, sub_key=None):
        """
    Fetches the given comma separated value, returning the default if the
    values aren't integers or don't follow the given constraints.
    
    :param str key: config setting to be fetched, last if multiple exists
    :param object default: value provided if no such key exists, doesn't match
      the count, values aren't all integers, or doesn't match the bounds
    :param int count: checks that the number of values matches this if set
    :param int min_value: checks that all values are over this if set
    :param int max_value: checks that all values are under this if set
    :param str sub_key: handle the configuration entry as a dictionary and use
      this key within it
    
    :returns: **list** with the stripped values
    """

        conf_comp = self.get_str_csv(key, default, count, sub_key)
        if conf_comp == default:
            return default

        # validates the input, setting the error_msg if there's a problem
        error_msg = None
        base_error_msg = "Config entry '%s' is expected to %%s" % key

        # includes our default value in the message
        if default != None and (isinstance(default, list) or isinstance(default, tuple)):
            default_str = ", ".join([str(i) for i in default])
            base_error_msg += ", defaulting to '%s'" % default_str

        for val in conf_comp:
            if not val.isdigit():
                error_msg = base_error_msg % "only have integer values"
                break
            else:
                if min_value != None and int(val) < min_value:
                    error_msg = base_error_msg % "only have values over %i" % min_value
                    break
                elif max_value != None and int(val) > max_value:
                    error_msg = base_error_msg % "only have values less than %i" % max_value
                    break

        if error_msg:
            log.debug(error_msg)
            return default
        else:
            return [int(val) for val in conf_comp]
Beispiel #24
0
  def _reset_subwindow(self):
    """
    Create a new subwindow instance for the panel if:
    - Panel currently doesn't have a subwindow (was uninitialized or
      invalidated).
    - There's room for the panel to grow vertically (curses automatically
      lets subwindows regrow horizontally, but not vertically).
    - The subwindow has been displaced. This is a curses display bug that
      manifests if the terminal's shrank then re-expanded. Displaced
      subwindows are never restored to their proper position, resulting in
      graphical glitches if we draw to them.
    - The preferred size is smaller than the actual size (should shrink).

    This returns True if a new subwindow instance was created, False otherwise.
    """

    new_height, new_width = self.get_preferred_size()

    if new_height == 0:
      return False  # subwindow would be outside its parent

    # determines if a new subwindow should be recreated

    recreate = self.win is None

    if self.win:
      subwin_max_y, subwin_max_x = self.win.getmaxyx()
      recreate |= subwin_max_y < new_height           # check for vertical growth
      recreate |= self.top > self.win.getparyx()[0]   # check for displacement
      recreate |= subwin_max_x > new_width or subwin_max_y > new_height  # shrinking

    # I'm not sure if recreating subwindows is some sort of memory leak but the
    # Python curses bindings seem to lack all of the following:
    # - subwindow deletion (to tell curses to free the memory)
    # - subwindow moving/resizing (to restore the displaced windows)
    # so this is the only option (besides removing subwindows entirely which
    # would mean far more complicated code and no more selective refreshing)

    if recreate:
      self.win = self.parent.subwin(new_height, new_width, self.top, self.left)

      # note: doing this log before setting win produces an infinite loop
      log.debug("recreating panel '%s' with the dimensions of %i/%i" % (self.get_name(), new_height, new_width))

    return recreate
Beispiel #25
0
    def _resetSubwindow(self):
        """
    Create a new subwindow instance for the panel if:
    - Panel currently doesn't have a subwindow (was uninitialized or
      invalidated).
    - There's room for the panel to grow vertically (curses automatically
      lets subwindows regrow horizontally, but not vertically).
    - The subwindow has been displaced. This is a curses display bug that
      manifests if the terminal's shrank then re-expanded. Displaced
      subwindows are never restored to their proper position, resulting in
      graphical glitches if we draw to them.
    - The preferred size is smaller than the actual size (should shrink).
    
    This returns True if a new subwindow instance was created, False otherwise.
    """

        newHeight, newWidth = self.getPreferredSize()
        if newHeight == 0:
            return False  # subwindow would be outside its parent

        # determines if a new subwindow should be recreated
        recreate = self.win == None
        if self.win:
            subwinMaxY, subwinMaxX = self.win.getmaxyx()
            recreate |= subwinMaxY < newHeight  # check for vertical growth
            recreate |= self.top > self.win.getparyx()[
                0]  # check for displacement
            recreate |= subwinMaxX > newWidth or subwinMaxY > newHeight  # shrinking

        # I'm not sure if recreating subwindows is some sort of memory leak but the
        # Python curses bindings seem to lack all of the following:
        # - subwindow deletion (to tell curses to free the memory)
        # - subwindow moving/resizing (to restore the displaced windows)
        # so this is the only option (besides removing subwindows entirely which
        # would mean far more complicated code and no more selective refreshing)

        if recreate:
            self.win = self.parent.subwin(newHeight, newWidth, self.top,
                                          self.left)

            # note: doing this log before setting win produces an infinite loop
            log.debug("recreating panel '%s' with the dimensions of %i/%i" %
                      (self.getName(), newHeight, newWidth))
        return recreate
Beispiel #26
0
def call(command, default = UNDEFINED):
  """
  Issues a command in a subprocess, blocking until completion and returning the
  results. This is not actually ran in a shell so pipes and other shell syntax
  are not permitted.

  :param str command: command to be issued
  :param object default: response if the query fails

  :returns: **list** with the lines of output from the command

  :raises: **OSError** if this fails and no default was provided
  """

  try:
    is_shell_command = command.split(" ")[0] in SHELL_COMMANDS

    start_time = time.time()
    stdout, stderr = subprocess.Popen(command.split(), stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = is_shell_command).communicate()
    stdout, stderr = stdout.strip(), stderr.strip()
    runtime = time.time() - start_time

    log.debug("System call: %s (runtime: %0.2f)" % (command, runtime))
    trace_prefix = "Received from system (%s)" % command

    if stdout and stderr:
      log.trace(trace_prefix + ", stdout:\n%s\nstderr:\n%s" % (stdout, stderr))
    elif stdout:
      log.trace(trace_prefix + ", stdout:\n%s" % stdout)
    elif stderr:
      log.trace(trace_prefix + ", stderr:\n%s" % stderr)

    if stdout:
      return stdout.decode("utf-8", "replace").splitlines()
    else:
      return []
  except OSError, exc:
    log.debug("System call (failed): %s (error: %s)" % (command, exc))

    if default != UNDEFINED:
      return default
    else:
      raise exc
Beispiel #27
0
def download(url: str,
             timeout: Optional[float] = None,
             retries: Optional[int] = None) -> bytes:
    """
  Download from the given url.

  .. versionadded:: 1.8.0

  :param url: uncompressed url to download from
  :param timeout: timeout when connection becomes idle, no timeout
    applied if **None**
  :param retires: maximum attempts to impose

  :returns: **bytes** content of the given url

  :raises:
    * :class:`~stem.DownloadTimeout` if our request timed out
    * :class:`~stem.DownloadFailed` if our request fails
  """

    if retries is None:
        retries = 0

    start_time = time.time()

    try:
        return urllib.request.urlopen(url, timeout=timeout).read()
    except socket.timeout as exc:
        raise stem.DownloadTimeout(url, exc, sys.exc_info()[2], timeout)
    except:
        exception, stacktrace = sys.exc_info()[1:3]

        if timeout is not None:
            timeout -= time.time() - start_time

        if retries > 0 and (timeout is None or timeout > 0):
            log.debug('Failed to download from %s (%i retries remaining): %s' %
                      (url, retries, exception))
            return download(url, timeout, retries - 1)
        else:
            log.debug('Failed to download from %s: %s' % (url, exception))
            raise stem.DownloadFailed(url, exception, stacktrace)
Beispiel #28
0
def call(command, suppress_exc = True):
  """
  Issues a command in a subprocess, blocking until completion and returning the
  results. This is not actually ran in a shell so pipes and other shell syntax
  are not permitted.
  
  :param str command: command to be issued
  :param bool suppress_exc: if **True** then **None** is returned on failure,
    otherwise this raises the exception
  
  :returns: **list** with the lines of output from the command, **None** in
    case of failure if suppress_exc is **True**
  
  :raises: **OSError** if this fails and suppress_exc is **False**
  """
  
  try:
    start_time = time.time()
    stdout, stderr = subprocess.Popen(command.split(), stdout = subprocess.PIPE, stderr = subprocess.PIPE).communicate()
    stdout, stderr = stdout.strip(), stderr.strip()
    runtime = time.time() - start_time
    
    log.debug("System call: %s (runtime: %0.2f)" % (command, runtime))
    trace_prefix = "Received from system (%s)" % command
    
    if stdout and stderr:
      log.trace(trace_prefix + ", stdout:\n%s\nstderr:\n%s" % (stdout, stderr))
    elif stdout:
      log.trace(trace_prefix + ", stdout:\n%s" % stdout)
    elif stderr:
      log.trace(trace_prefix + ", stderr:\n%s" % stderr)
    
    if stdout: return stdout.splitlines()
    else: return []
  except OSError, exc:
    log.debug("System call (failed): %s (error: %s)" % (command, exc))
    
    if suppress_exc: return None
    else: raise exc
Beispiel #29
0
 def set_options(self, params, reset = False):
   """
   Changes multiple tor configuration options via either a SETCONF or
   RESETCONF query. Both behave identically unless our value is None, in which
   case SETCONF sets the value to 0 or NULL, and RESETCONF returns it to its
   default value. This accepts str, list, or None values in a similar fashion
   to :func:`~stem.control.Controller.set_conf`. For example...
   
   ::
   
     my_controller.set_options({
       "Nickname": "caerSidi",
       "ExitPolicy": ["accept *:80", "accept *:443", "reject *:*"],
       "ContactInfo": "*****@*****.**",
       "Log": None,
     })
   
   The params can optionally be a list a key/value tuples, though the only
   reason this type of argument would be useful is for hidden service
   configuration (those options are order dependent).
   
   :param dict,list params: mapping of configuration options to the values
     we're setting it to
   :param bool reset: issues a RESETCONF, returning **None** values to their
     defaults if **True**
   
   :raises:
     * :class:`stem.socket.ControllerError` if the call fails
     * :class:`stem.socket.InvalidArguments` if configuration options
       requested was invalid
     * :class:`stem.socket.InvalidRequest` if the configuration setting is
       impossible or if there's a syntax error in the configuration values
   """
   
   start_time = time.time()
   
   # constructs the SETCONF or RESETCONF query
   query_comp = ["RESETCONF" if reset else "SETCONF"]
   
   if isinstance(params, dict):
     params = params.items()
   
   for param, value in params:
     if isinstance(value, str):
       query_comp.append("%s=\"%s\"" % (param, value.strip()))
     elif value:
       query_comp.extend(["%s=\"%s\"" % (param, val.strip()) for val in value])
     else:
       query_comp.append(param)
   
   query = " ".join(query_comp)
   response = self.msg(query)
   stem.response.convert("SINGLELINE", response)
   
   if response.is_ok():
     log.debug("%s (runtime: %0.4f)" % (query, time.time() - start_time))
     
     if self.is_caching_enabled():
       for param, value in params:
         cache_key = "getconf.%s" % param.lower()
         
         if value is None:
           if cache_key in self._request_cache:
             del self._request_cache[cache_key]
         elif isinstance(value, str):
           self._request_cache[cache_key] = [value]
         else:
           self._request_cache[cache_key] = value
   else:
     log.debug("%s (failed, code: %s, message: %s)" % (query, response.code, response.message))
     
     if response.code == "552":
       if response.message.startswith("Unrecognized option: Unknown option '"):
         key = response.message[37:response.message.find("\'", 37)]
         raise stem.socket.InvalidArguments(response.code, response.message, [key])
       raise stem.socket.InvalidRequest(response.code, response.message)
     elif response.code in ("513", "553"):
       raise stem.socket.InvalidRequest(response.code, response.message)
     else:
       raise stem.socket.ProtocolError("Returned unexpected status code: %s" % response.code)
Beispiel #30
0
def get_cwd(pid):
  """
  Provides the working directory of the given process.

  :param int pid: process id of the process to be queried
  :returns: **str** with the absolute path for the process' present working
    directory, **None** if it can't be determined
  """

  # try fetching via the proc contents if it's available
  if stem.util.proc.is_available():
    try:
      return stem.util.proc.get_cwd(pid)
    except IOError:
      pass

  # Fall back to a pwdx query. This isn't available on BSD.
  logging_prefix = "get_cwd(%s):" % pid

  if is_available("pwdx"):
    # pwdx results are of the form:
    # 3799: /home/atagar
    # 5839: No such process

    results = call(GET_CWD_PWDX % pid)

    if not results:
      log.debug("%s pwdx didn't return any results" % logging_prefix)
    elif results[0].endswith("No such process"):
      log.debug("%s pwdx processes reported for this pid" % logging_prefix)
    elif len(results) != 1 or results[0].count(" ") != 1 or not results[0].startswith("%s: " % pid):
      log.debug("%s we got unexpected output from pwdx: %s" % (logging_prefix, results))
    else:
      return results[0].split(" ", 1)[1].strip()

  # Use lsof as the final fallback. This is available on both Linux and is the
  # only lookup method here that works for BSD...
  # https://trac.torproject.org/projects/tor/ticket/4236
  #
  # flags:
  #   a - presents the intersection of the following arguments
  #   p - limits results to this pid
  #   d cwd - limits results to just the cwd rather than all open files
  #   Fn - short listing in a single column, with just the pid and cwd
  #
  # example output:
  #   ~$ lsof -a -p 75717 -d cwd -Fn
  #   p75717
  #   n/Users/atagar/tor/src/or

  if is_available("lsof"):
    results = call(GET_CWD_LSOF % pid)

    if results and len(results) == 2 and results[1].startswith("n/"):
      lsof_result = results[1][1:].strip()

      # If we lack read permissions for the cwd then it returns...
      # p2683
      # n/proc/2683/cwd (readlink: Permission denied)

      if not " " in lsof_result:
        return lsof_result
    else:
      log.debug("%s we got unexpected output from lsof: %s" % (logging_prefix, results))

  return None  # all queries failed
Beispiel #31
0
def get_pid_by_name(process_name):
  """
  Attempts to determine the process id for a running process, using...

  ::

    1. pgrep -x <name>
    2. pidof <name>
    3. ps -o pid -C <name> (linux)
       ps axc | egrep " <name>$" (bsd)
    4. lsof -tc <name>

  Results with multiple instances of the process are discarded.

  :param str process_name: process name for which to fetch the pid

  :returns: **int** with the process id, **None** if it can't be determined
  """

  # attempts to resolve using pgrep, failing if:
  # - we're running on bsd (command unavailable)
  #
  # example output:
  #   atagar@morrigan:~$ pgrep -x vim
  #   3283
  #   3392

  if is_available("pgrep"):
    results = call(GET_PID_BY_NAME_PGREP % process_name)

    if results and len(results) == 1:
      pid = results[0].strip()

      if pid.isdigit():
        return int(pid)

  # attempts to resolve using pidof, failing if:
  # - we're running on bsd (command unavailable)
  #
  # example output:
  #   atagar@morrigan:~$ pidof vim
  #   3392 3283

  if is_available("pidof"):
    results = call(GET_PID_BY_NAME_PIDOF % process_name)

    if results and len(results) == 1 and len(results[0].split()) == 1:
      pid = results[0].strip()

      if pid.isdigit():
        return int(pid)

  # attempts to resolve using ps, failing if:
  # - system's ps variant doesn't handle these flags (none known at the moment)
  #
  # example output:
  #   atagar@morrigan:~/Desktop/stem$ ps -o pid -C vim
  #     PID
  #    3283
  #    3392
  #
  #   atagar$ ps axc
  #     PID   TT  STAT      TIME COMMAND
  #       1   ??  Ss     9:00.22 launchd
  #      10   ??  Ss     0:09.97 kextd
  #      11   ??  Ss     5:47.36 DirectoryService
  #      12   ??  Ss     3:01.44 notifyd

  if is_available("ps"):
    if not is_bsd():
      # linux variant of ps
      results = call(GET_PID_BY_NAME_PS_LINUX % process_name)

      if results and len(results) == 2:
        pid = results[1].strip()

        if pid.isdigit():
          return int(pid)

    if is_bsd():
      # bsd variant of ps
      results = call(GET_PID_BY_NAME_PS_BSD)

      if results:
        # filters results to those with our process name
        results = [r for r in results if r.endswith(" %s" % process_name)]

        if len(results) == 1 and len(results[0].split()) > 0:
          pid = results[0].split()[0]

          if pid.isdigit():
            return int(pid)

  # resolves using lsof which works on both Linux and BSD, only failing if:
  # - lsof is unavailable (not included by default on OpenBSD)
  # - the process being run as a different user due to permissions
  # - the process doesn't have any open files to be reported by lsof?
  #
  # flags:
  #   t - only show pids
  #   c - restrict results to that command
  #
  # example output:
  #   atagar@morrigan:~$ lsof -t -c vim
  #   2470
  #   2561

  if is_available("lsof"):
    results = call(GET_PID_BY_NAME_LSOF % process_name)

    if results and len(results) == 1:
      pid = results[0].strip()

      if pid.isdigit():
        return int(pid)

  log.debug("failed to resolve a pid for '%s'" % process_name)
  return None
Beispiel #32
0
 def get_info(self, params, default = UNDEFINED):
   """
   Queries the control socket for the given GETINFO option. If provided a
   default then that's returned if the GETINFO option is undefined or the
   call fails for any reason (error response, control port closed, initiated,
   etc).
   
   :param str,list param: GETINFO option or options to be queried
   :param object default: response if the query fails
   
   :returns:
     Response depends upon how we were called as follows...
     
     * **str** with the response if our param was a **str**
     * **dict** with the 'param => response' mapping if our param was a **list**
     * default if one was provided and our call failed
   
   :raises:
     * :class:`stem.socket.ControllerError` if the call fails and we weren't
       provided a default response
     * :class:`stem.socket.InvalidArguments` if the 'param' requested was
       invalid
   """
   
   start_time = time.time()
   reply = {}
   
   if isinstance(params, str):
     is_multiple = False
     params = set([params])
   else:
     if not params: return {}
     is_multiple = True
     params = set(params)
   
   # check for cached results
   for param in list(params):
     cache_key = "getinfo.%s" % param.lower()
     
     if cache_key in self._request_cache:
       reply[param] = self._request_cache[cache_key]
       params.remove(param)
     elif param.startswith('ip-to-country/') and self.is_geoip_unavailable():
       # the geoip database aleady looks to be unavailable - abort the request
       raise stem.socket.ProtocolError("Tor geoip database is unavailable")
   
   # if everything was cached then short circuit making the query
   if not params:
     log.debug("GETINFO %s (cache fetch)" % " ".join(reply.keys()))
     
     if is_multiple: return reply
     else: return reply.values()[0]
   
   try:
     response = self.msg("GETINFO %s" % " ".join(params))
     stem.response.convert("GETINFO", response)
     response.assert_matches(params)
     reply.update(response.entries)
     
     if self.is_caching_enabled():
       for key, value in response.entries.items():
         key = key.lower() # make case insensitive
         
         if key in CACHEABLE_GETINFO_PARAMS:
           self._request_cache["getinfo.%s" % key] = value
         elif key.startswith('ip-to-country/'):
           # both cache-able and means that we should reset the geoip failure count
           self._request_cache["getinfo.%s" % key] = value
           self._geoip_failure_count = -1
     
     log.debug("GETINFO %s (runtime: %0.4f)" % (" ".join(params), time.time() - start_time))
     
     if is_multiple:
       return reply
     else:
       return reply.values()[0]
   except stem.socket.ControllerError, exc:
     # bump geoip failure count if...
     # * we're caching results
     # * this was soley a geoip lookup
     # * we've never had a successful geoip lookup (failure count isn't -1)
     
     is_geoip_request = len(params) == 1 and list(params)[0].startswith('ip-to-country/')
     
     if is_geoip_request and self.is_caching_enabled() and self._geoip_failure_count != -1:
       self._geoip_failure_count += 1
     
     log.debug("GETINFO %s (failed: %s)" % (" ".join(params), exc))
     
     if default == UNDEFINED: raise exc
     else: return default
Beispiel #33
0
 def get_conf_map(self, params, default = UNDEFINED, multiple = True):
   """
   Queries the control socket for the values of given configuration options
   and provides a mapping of the keys to the values. If provided a default
   then that's returned if the GETCONF option is undefined or if the call
   fails for any reason (invalid configuration option, error response, control
   port closed, initiated, etc). Configuration keys that are empty or contain
   only whitespace are ignored.
   
   There's three use cases for GETCONF:
   
     1. a single value is provided
     2. multiple values are provided for the option queried
     3. a set of options that weren't necessarily requested are returned (for
        instance querying HiddenServiceOptions gives HiddenServiceDir,
        HiddenServicePort, etc)
   
   The vast majority of the options fall into the first two categories, in
   which case calling :func:`~stem.control.Controller.get_conf` is sufficient.
   However, for batch queries or the special options that give a set of values
   this provides back the full response. As of tor version 0.2.1.25
   HiddenServiceOptions was the only option like this.
   
   The :func:`~stem.control.Controller.get_conf` and
   :func:`~stem.control.Controller.get_conf_map` functions both try to account
   for these special mappings, so queried like get_conf("HiddenServicePort")
   should behave as you'd expect. This method, however, simply returns
   whatever Tor provides so get_conf_map("HiddenServicePort") will give the
   same response as get_conf_map("HiddenServiceOptions").
   
   :param str,list params: configuration option(s) to be queried
   :param object default: response if the query fails
   :param bool multiple: if **True**, the value(s) provided are lists of all
     returned values,otherwise this just provides the first value
   
   :returns:
     Response depends upon how we were called as follows...
     
     * **dict** of 'config key => value' mappings, the value is a list if
       'multiple' is **True** and a **str** of just the first value otherwise
     * default if one was provided and our call failed
   
   :raises:
     * :class:`stem.socket.ControllerError` if the call fails and we weren't provided a default response
     * :class:`stem.socket.InvalidArguments` if the configuration option requested was invalid
   """
   
   start_time = time.time()
   reply = {}
   
   if isinstance(params, str):
     params = [params]
   
   # remove strings which contain only whitespace
   params = filter(lambda entry: entry.strip(), params)
   if params == []: return {}
   
   # translate context sensitive options
   lookup_params = set([MAPPED_CONFIG_KEYS.get(entry, entry) for entry in params])
   
   # check for cached results
   for param in list(lookup_params):
     cache_key = "getconf.%s" % param.lower()
     
     if cache_key in self._request_cache:
       reply[param] = self._request_cache[cache_key]
       lookup_params.remove(param)
   
   # if everything was cached then short circuit making the query
   if not lookup_params:
     log.debug("GETCONF %s (cache fetch)" % " ".join(reply.keys()))
     
     if multiple:
       return reply
     else:
       return dict([(entry[0], entry[1][0]) for entry in reply.items()])
   
   try:
     response = self.msg("GETCONF %s" % ' '.join(lookup_params))
     stem.response.convert("GETCONF", response)
     reply.update(response.entries)
     
     if self.is_caching_enabled():
       for key, value in response.entries.items():
         self._request_cache["getconf.%s" % key.lower()] = value
     
     # Maps the entries back to the parameters that the user requested so the
     # capitalization matches (ie, if they request "exitpolicy" then that
     # should be the key rather than "ExitPolicy"). When the same
     # configuration key is provided multiple times this determines the case
     # based on the first and ignores the rest.
     #
     # This retains the tor provided camel casing of MAPPED_CONFIG_KEYS
     # entries since the user didn't request those by their key, so we can't
     # be sure what they wanted.
     
     for key in reply:
       if not key.lower() in MAPPED_CONFIG_KEYS.values():
         user_expected_key = _case_insensitive_lookup(params, key, key)
         
         if key != user_expected_key:
           reply[user_expected_key] = reply[key]
           del reply[key]
     
     log.debug("GETCONF %s (runtime: %0.4f)" % (" ".join(lookup_params), time.time() - start_time))
     
     if multiple:
       return reply
     else:
       return dict([(entry[0], entry[1][0]) for entry in reply.items()])
   except stem.socket.ControllerError, exc:
     log.debug("GETCONF %s (failed: %s)" % (" ".join(lookup_params), exc))
     
     if default != UNDEFINED: return default
     else: raise exc
Beispiel #34
0
  # remove authentication methods that are either unknown or for which we don't
  # have an input
  if AuthMethod.UNKNOWN in auth_methods:
    auth_methods.remove(AuthMethod.UNKNOWN)

    unknown_methods = protocolinfo_response.unknown_auth_methods
    plural_label = "s" if len(unknown_methods) > 1 else ""
    methods_label = ", ".join(unknown_methods)

    # we... er, can't do anything with only unrecognized auth types
    if not auth_methods:
      exc_msg = "unrecognized authentication method%s (%s)" % (plural_label, methods_label)
      auth_exceptions.append(UnrecognizedAuthMethods(exc_msg, unknown_methods))
    else:
      log.debug("Authenticating to a socket with unrecognized auth method%s, ignoring them: %s" % (plural_label, methods_label))

  if protocolinfo_response.cookie_path is None:
    for cookie_auth_method in (AuthMethod.COOKIE, AuthMethod.SAFECOOKIE):
      if cookie_auth_method in auth_methods:
        auth_methods.remove(cookie_auth_method)

        exc_msg = "our PROTOCOLINFO response did not have the location of our authentication cookie"
        auth_exceptions.append(NoAuthCookie(exc_msg, cookie_auth_method == AuthMethod.SAFECOOKIE))

  if AuthMethod.PASSWORD in auth_methods and password is None:
    auth_methods.remove(AuthMethod.PASSWORD)
    auth_exceptions.append(MissingPassword("no passphrase provided"))

  # iterating over AuthMethods so we can try them in this order
  for auth_type in (AuthMethod.NONE, AuthMethod.PASSWORD, AuthMethod.SAFECOOKIE, AuthMethod.COOKIE):
Beispiel #35
0
 def _log(msg):
   if LOG_CONNECTION_RESOLUTION:
     log.debug(msg)
Beispiel #36
0
def pid_by_name(process_name, multiple = False):
  """
  Attempts to determine the process id for a running process, using...

  ::

    1. pgrep -x <name>
    2. pidof <name>
    3. ps -o pid -C <name> (linux)
       ps axc | egrep " <name>$" (bsd)
    4. lsof -tc <name>
    5. tasklist | str <name>.exe

  :param str process_name: process name for which to fetch the pid
  :param bool multiple: provides a list of all pids if **True**, otherwise
    results with multiple processes are discarded

  :returns:
    Response depends upon the 'multiple' argument as follows...

    * if **False** then this provides an **int** with the process id or **None** if it can't be determined
    * if **True** then this provides a **list** of all **int** process ids, and an empty list if it can't be determined
  """

  # attempts to resolve using pgrep, failing if:
  # - we're running on bsd (command unavailable)
  #
  # example output:
  #   atagar@morrigan:~$ pgrep -x vim
  #   3283
  #   3392

  if is_available('pgrep'):
    results = call(GET_PID_BY_NAME_PGREP % process_name, None)

    if results:
      try:
        pids = list(map(int, results))

        if multiple:
          return pids
        elif len(pids) == 1:
          return pids[0]
      except ValueError:
        pass

  # attempts to resolve using pidof, failing if:
  # - we're running on bsd (command unavailable)
  #
  # example output:
  #   atagar@morrigan:~$ pidof vim
  #   3392 3283

  if is_available('pidof'):
    results = call(GET_PID_BY_NAME_PIDOF % process_name, None)

    if results and len(results) == 1:
      try:
        pids = list(map(int, results[0].split()))

        if multiple:
          return pids
        elif len(pids) == 1:
          return pids[0]
      except ValueError:
        pass

  # attempts to resolve using ps, failing if:
  # - system's ps variant doesn't handle these flags (none known at the moment)
  #
  # example output:
  #   atagar@morrigan:~/Desktop/stem$ ps -o pid -C vim
  #     PID
  #    3283
  #    3392
  #
  #   atagar$ ps axc
  #     PID   TT  STAT      TIME COMMAND
  #       1   ??  Ss     9:00.22 launchd
  #      10   ??  Ss     0:09.97 kextd
  #      11   ??  Ss     5:47.36 DirectoryService
  #      12   ??  Ss     3:01.44 notifyd

  if is_available('ps'):
    if not is_bsd():
      # linux variant of ps
      results = call(GET_PID_BY_NAME_PS_LINUX % process_name, None)

      if results:
        try:
          pids = list(map(int, results[1:]))

          if multiple:
            return pids
          elif len(pids) == 1:
            return pids[0]
        except ValueError:
          pass

    if is_bsd():
      # bsd variant of ps
      results = call(GET_PID_BY_NAME_PS_BSD, None)

      if results:
        # filters results to those with our process name
        results = [r.split()[0] for r in results if r.endswith(' %s' % process_name)]

        try:
          pids = list(map(int, results))

          if multiple:
            return pids
          elif len(pids) == 1:
            return pids[0]
        except ValueError:
          pass

  # resolves using lsof which works on both Linux and BSD, only failing if:
  # - lsof is unavailable (not included by default on OpenBSD)
  # - the process being run as a different user due to permissions
  # - the process doesn't have any open files to be reported by lsof?
  #
  # flags:
  #   t - only show pids
  #   c - restrict results to that command
  #
  # example output:
  #   atagar@morrigan:~$ lsof -t -c vim
  #   2470
  #   2561

  if is_available('lsof'):
    results = call(GET_PID_BY_NAME_LSOF % process_name, None)

    if results:
      try:
        pids = list(map(int, results))

        if multiple:
          return pids
        elif len(pids) == 1:
          return pids[0]
      except ValueError:
        pass

  if is_available('tasklist') and is_windows():
    if not process_name.endswith('.exe'):
      process_name = process_name + '.exe'

    process_ids = []

    results = stem.util.system.call('tasklist', None)

    if results:
      tasklist_regex = re.compile('^\s*%s\s+(?P<pid>[0-9]*)' % process_name)

      for line in results:
        match = tasklist_regex.search(line)

        if match:
          process_ids.append(int(match.group('pid')))

      if multiple:
        return process_ids
      elif len(process_ids) > 0:
        return process_ids[0]

  log.debug("failed to resolve a pid for '%s'" % process_name)
  return [] if multiple else None
Beispiel #37
0
def saveConf(destination=None, contents=None):
    """
  Saves the configuration to the given path. If this is equivilant to
  issuing a SAVECONF (the contents and destination match what tor's using)
  then that's done. Otherwise, this writes the contents directly. This raises
  an IOError if unsuccessful.
  
  Arguments:
    destination - path to be saved to, the current config location if None
    contents    - configuration to be saved, the current config if None
  """

    if destination:
        destination = os.path.abspath(destination)

    # fills default config values, and sets isSaveconf to false if they differ
    # from the arguments
    isSaveconf, startTime = True, time.time()

    currentConfig = getCustomOptions(True)
    if not contents: contents = currentConfig
    else: isSaveconf &= contents == currentConfig

    # The "GETINFO config-text" option was introduced in Tor version 0.2.2.7. If
    # we're writing custom contents then this is fine, but if we're trying to
    # save the current configuration then we need to fail if it's unavailable.
    # Otherwise we'd write a blank torrc as per...
    # https://trac.torproject.org/projects/tor/ticket/3614

    if contents == ['']:
        # double check that "GETINFO config-text" is unavailable rather than just
        # giving an empty result

        if torTools.getConn().getInfo("config-text", None) == None:
            raise IOError("determining the torrc requires Tor version 0.2.2.7")

    currentLocation = None
    try:
        currentLocation = getConfigLocation()
        if not destination: destination = currentLocation
        else: isSaveconf &= destination == currentLocation
    except IOError:
        pass

    if not destination: raise IOError("unable to determine the torrc's path")
    logMsg = "Saved config by %%s to %s (runtime: %%0.4f)" % destination

    # attempts SAVECONF if we're updating our torrc with the current state
    if isSaveconf:
        try:
            torTools.getConn().saveConf()

            try:
                getTorrc().load()
            except IOError:
                pass

            log.debug(logMsg % ("SAVECONF", time.time() - startTime))
            return  # if successful then we're done
        except:
            pass

    # if the SAVECONF fails or this is a custom save then write contents directly
    try:
        # make dir if the path doesn't already exist
        baseDir = os.path.dirname(destination)
        if not os.path.exists(baseDir): os.makedirs(baseDir)

        # saves the configuration to the file
        configFile = open(destination, "w")
        configFile.write("\n".join(contents))
        configFile.close()
    except (IOError, OSError), exc:
        raise IOError(exc)
Beispiel #38
0
        # saves the configuration to the file
        configFile = open(destination, "w")
        configFile.write("\n".join(contents))
        configFile.close()
    except (IOError, OSError), exc:
        raise IOError(exc)

    # reloads the cached torrc if overwriting it
    if destination == currentLocation:
        try:
            getTorrc().load()
        except IOError:
            pass

    log.debug(logMsg % ("directly writing", time.time() - startTime))


def validate(contents=None):
    """
  Performs validation on the given torrc contents, providing back a listing of
  (line number, issue, msg) tuples for issues found. If the issue occures on a
  multiline torrc entry then the line number is for the last line of the entry.
  
  Arguments:
    contents - torrc contents
  """

    conn = torTools.getConn()
    customOptions = getCustomOptions()
    issuesFound, seenOptions = [], []
Beispiel #39
0
    def load(self, path=None):
        """
    Reads in the contents of the given path, adding its configuration values
    to our current contents. If the path is a directory then this loads each
    of the files, recursively.

    .. versionchanged:: 1.3.0
       Added support for directories.

    :param str path: file or directory path to be loaded, this uses the last
      loaded path if not provided

    :raises:
      * **IOError** if we fail to read the file (it doesn't exist, insufficient
        permissions, etc)
      * **ValueError** if no path was provided and we've never been provided one
    """

        if path:
            self._path = path
        elif not self._path:
            raise ValueError('Unable to load configuration: no path provided')

        if os.path.isdir(self._path):
            for root, dirnames, filenames in os.walk(self._path):
                for filename in filenames:
                    self.load(os.path.join(root, filename))

            return

        with open(self._path, 'r') as config_file:
            read_contents = config_file.readlines()

        with self._contents_lock:
            while read_contents:
                line = read_contents.pop(0)

                # strips any commenting or excess whitespace
                comment_start = line.find('#')

                if comment_start != -1:
                    line = line[:comment_start]

                line = line.strip()

                # parse the key/value pair
                if line:
                    try:
                        key, value = line.split(' ', 1)
                        value = value.strip()
                    except ValueError:
                        log.debug(
                            "Config entry '%s' is expected to be of the format 'Key Value', defaulting to '%s' -> ''"
                            % (line, line))
                        key, value = line, ''

                    if not value:
                        # this might be a multi-line entry, try processing it as such
                        multiline_buffer = []

                        while read_contents and read_contents[0].lstrip(
                        ).startswith('|'):
                            content = read_contents.pop(0).lstrip()[
                                1:]  # removes '\s+|' prefix
                            content = content.rstrip('\n')  # trailing newline
                            multiline_buffer.append(content)

                        if multiline_buffer:
                            self.set(key, '\n'.join(multiline_buffer), False)
                            continue

                    self.set(key, value, False)
    def get(self, key, default=None):
        """
    Fetches the given configuration, using the key and default value to
    determine the type it should be. Recognized inferences are:

    * **default is a boolean => boolean**

      * values are case insensitive
      * provides the default if the value isn't "true" or "false"

    * **default is an integer => int**

      * provides the default if the value can't be converted to an int

    * **default is a float => float**

      * provides the default if the value can't be converted to a float

    * **default is a list => list**

      * string contents for all configuration values with this key

    * **default is a tuple => tuple**

      * string contents for all configuration values with this key

    * **default is a dictionary => dict**

      * values without "=>" in them are ignored
      * values are split into key/value pairs on "=>" with extra whitespace
        stripped

    :param str key: config setting to be fetched
    :param default object: value provided if no such key exists or fails to be converted

    :returns: given configuration value with its type inferred with the above rules
    """

        is_multivalue = isinstance(default, (list, tuple, dict))
        val = self.get_value(key, default, is_multivalue)

        if val == default:
            return val  # don't try to infer undefined values

        if isinstance(default, bool):
            if val.lower() == 'true':
                val = True
            elif val.lower() == 'false':
                val = False
            else:
                log.debug(
                    "Config entry '%s' is expected to be a boolean, defaulting to '%s'"
                    % (key, str(default)))
                val = default
        elif isinstance(default, int):
            try:
                val = int(val)
            except ValueError:
                log.debug(
                    "Config entry '%s' is expected to be an integer, defaulting to '%i'"
                    % (key, default))
                val = default
        elif isinstance(default, float):
            try:
                val = float(val)
            except ValueError:
                log.debug(
                    "Config entry '%s' is expected to be a float, defaulting to '%f'"
                    % (key, default))
                val = default
        elif isinstance(default, list):
            val = list(val)  # make a shallow copy
        elif isinstance(default, tuple):
            val = tuple(val)
        elif isinstance(default, dict):
            val_map = OrderedDict()
            for entry in val:
                if '=>' in entry:
                    entry_key, entry_val = entry.split('=>', 1)
                    val_map[entry_key.strip()] = entry_val.strip()
                else:
                    log.debug(
                        'Ignoring invalid %s config entry (expected a mapping, but "%s" was missing "=>")'
                        % (key, entry))
            val = val_map

        return val
Beispiel #41
0
def saveConf(destination = None, contents = None):
  """
  Saves the configuration to the given path. If this is equivilant to
  issuing a SAVECONF (the contents and destination match what tor's using)
  then that's done. Otherwise, this writes the contents directly. This raises
  an IOError if unsuccessful.
  
  Arguments:
    destination - path to be saved to, the current config location if None
    contents    - configuration to be saved, the current config if None
  """
  
  if destination:
    destination = os.path.abspath(destination)
  
  # fills default config values, and sets isSaveconf to false if they differ
  # from the arguments
  isSaveconf, startTime = True, time.time()
  
  currentConfig = getCustomOptions(True)
  if not contents: contents = currentConfig
  else: isSaveconf &= contents == currentConfig
  
  # The "GETINFO config-text" option was introduced in Tor version 0.2.2.7. If
  # we're writing custom contents then this is fine, but if we're trying to
  # save the current configuration then we need to fail if it's unavailable.
  # Otherwise we'd write a blank torrc as per...
  # https://trac.torproject.org/projects/tor/ticket/3614
  
  if contents == ['']:
    # double check that "GETINFO config-text" is unavailable rather than just
    # giving an empty result
    
    if torTools.getConn().getInfo("config-text", None) == None:
      raise IOError("determining the torrc requires Tor version 0.2.2.7")
  
  currentLocation = None
  try:
    currentLocation = getConfigLocation()
    if not destination: destination = currentLocation
    else: isSaveconf &= destination == currentLocation
  except IOError: pass
  
  if not destination: raise IOError("unable to determine the torrc's path")
  logMsg = "Saved config by %%s to %s (runtime: %%0.4f)" % destination
  
  # attempts SAVECONF if we're updating our torrc with the current state
  if isSaveconf:
    try:
      torTools.getConn().saveConf()
      
      try: getTorrc().load()
      except IOError: pass
      
      log.debug(logMsg % ("SAVECONF", time.time() - startTime))
      return # if successful then we're done
    except:
      pass
  
  # if the SAVECONF fails or this is a custom save then write contents directly
  try:
    # make dir if the path doesn't already exist
    baseDir = os.path.dirname(destination)
    if not os.path.exists(baseDir): os.makedirs(baseDir)
    
    # saves the configuration to the file
    configFile = open(destination, "w")
    configFile.write("\n".join(contents))
    configFile.close()
  except (IOError, OSError), exc:
    raise IOError(exc)
Beispiel #42
0
  def get(self, key, default = None):
    """
    Fetches the given configuration, using the key and default value to
    determine the type it should be. Recognized inferences are:

    * **default is a boolean => boolean**

      * values are case insensitive
      * provides the default if the value isn't "true" or "false"

    * **default is an integer => int**

      * provides the default if the value can't be converted to an int

    * **default is a float => float**

      * provides the default if the value can't be converted to a float

    * **default is a list => list**

      * string contents for all configuration values with this key

    * **default is a tuple => tuple**

      * string contents for all configuration values with this key

    * **default is a dictionary => dict**

      * values without "=>" in them are ignored
      * values are split into key/value pairs on "=>" with extra whitespace
        stripped

    :param str key: config setting to be fetched
    :param default object: value provided if no such key exists or fails to be converted

    :returns: given configuration value with its type inferred with the above rules
    """

    is_multivalue = isinstance(default, (list, tuple, dict))
    val = self.get_value(key, default, is_multivalue)

    if val == default:
      return val  # don't try to infer undefined values

    if isinstance(default, bool):
      if val.lower() == "true":
        val = True
      elif val.lower() == "false":
        val = False
      else:
        log.debug("Config entry '%s' is expected to be a boolean, defaulting to '%s'" % (key, str(default)))
        val = default
    elif isinstance(default, int):
      try:
        val = int(val)
      except ValueError:
        log.debug("Config entry '%s' is expected to be an integer, defaulting to '%i'" % (key, default))
        val = default
    elif isinstance(default, float):
      try:
        val = float(val)
      except ValueError:
        log.debug("Config entry '%s' is expected to be a float, defaulting to '%f'" % (key, default))
        val = default
    elif isinstance(default, list):
      pass  # nothing special to do (already a list)
    elif isinstance(default, tuple):
      val = tuple(val)
    elif isinstance(default, dict):
      valMap = {}
      for entry in val:
        if "=>" in entry:
          entryKey, entryVal = entry.split("=>", 1)
          valMap[entryKey.strip()] = entryVal.strip()
        else:
          log.debug("Ignoring invalid %s config entry (expected a mapping, but \"%s\" was missing \"=>\")" % (key, entry))
      val = valMap

    return val
Beispiel #43
0
    baseDir = os.path.dirname(destination)
    if not os.path.exists(baseDir): os.makedirs(baseDir)
    
    # saves the configuration to the file
    configFile = open(destination, "w")
    configFile.write("\n".join(contents))
    configFile.close()
  except (IOError, OSError), exc:
    raise IOError(exc)
  
  # reloads the cached torrc if overwriting it
  if destination == currentLocation:
    try: getTorrc().load()
    except IOError: pass
  
  log.debug(logMsg % ("directly writing", time.time() - startTime))

def validate(contents = None):
  """
  Performs validation on the given torrc contents, providing back a listing of
  (line number, issue, msg) tuples for issues found. If the issue occures on a
  multiline torrc entry then the line number is for the last line of the entry.
  
  Arguments:
    contents - torrc contents
  """
  
  conn = torTools.getConn()
  customOptions = getCustomOptions()
  issuesFound, seenOptions = [], []
  
Beispiel #44
0
def call(command, default = UNDEFINED, ignore_exit_status = False, timeout = None, cwd = None, env = None):
  """
  call(command, default = UNDEFINED, ignore_exit_status = False)

  Issues a command in a subprocess, blocking until completion and returning the
  results. This is not actually ran in a shell so pipes and other shell syntax
  are not permitted.

  .. versionchanged:: 1.5.0
     Providing additional information upon failure by raising a CallError. This
     is a subclass of OSError, providing backward compatibility.

  .. versionchanged:: 1.5.0
     Added env argument.

  .. versionchanged:: 1.6.0
     Added timeout and cwd arguments.

  :param str,list command: command to be issued
  :param object default: response if the query fails
  :param bool ignore_exit_status: reports failure if our command's exit status
    was non-zero
  :param float timeout: maximum seconds to wait, blocks indefinitely if
    **None**
  :param dict env: environment variables

  :returns: **list** with the lines of output from the command

  :raises:
    * **CallError** if this fails and no default was provided
    * **CallTimeoutError** if the timeout is reached without a default
  """

  global SYSTEM_CALL_TIME

  if isinstance(command, str):
    command_list = command.split(' ')
  else:
    command_list = list(map(str, command))

  exit_status, runtime, stdout, stderr = None, None, None, None
  start_time = time.time()

  try:
    is_shell_command = command_list[0] in SHELL_COMMANDS

    process = subprocess.Popen(command_list, stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = is_shell_command, cwd = cwd, env = env)

    if timeout:
      while process.poll() is None:
        if time.time() - start_time > timeout:
          raise CallTimeoutError("Process didn't finish after %0.1f seconds" % timeout, ' '.join(command_list), None, timeout, '', '', timeout)

        time.sleep(0.001)

    stdout, stderr = process.communicate()
    stdout, stderr = stdout.strip(), stderr.strip()
    runtime = time.time() - start_time

    log.debug('System call: %s (runtime: %0.2f)' % (command, runtime))
    trace_prefix = 'Received from system (%s)' % command

    if stdout and stderr:
      log.trace(trace_prefix + ', stdout:\n%s\nstderr:\n%s' % (stdout, stderr))
    elif stdout:
      log.trace(trace_prefix + ', stdout:\n%s' % stdout)
    elif stderr:
      log.trace(trace_prefix + ', stderr:\n%s' % stderr)

    exit_status = process.poll()

    if not ignore_exit_status and exit_status != 0:
      raise OSError('%s returned exit status %i' % (command, exit_status))

    if stdout:
      return stdout.decode('utf-8', 'replace').splitlines()
    else:
      return []
  except CallTimeoutError as exc:
    log.debug('System call (timeout): %s (after %0.4fs)' % (command, timeout))

    if default != UNDEFINED:
      return default
    else:
      raise
  except OSError as exc:
    log.debug('System call (failed): %s (error: %s)' % (command, exc))

    if default != UNDEFINED:
      return default
    else:
      raise CallError(str(exc), ' '.join(command_list), exit_status, runtime, stdout, stderr)
  finally:
    with SYSTEM_CALL_TIME_LOCK:
      SYSTEM_CALL_TIME += time.time() - start_time
Beispiel #45
0
 def _log(msg):
     if LOG_CONNECTION_RESOLUTION:
         log.debug(msg)
Beispiel #46
0
    if AuthMethod.UNKNOWN in auth_methods:
        auth_methods.remove(AuthMethod.UNKNOWN)

        unknown_methods = protocolinfo_response.unknown_auth_methods
        plural_label = "s" if len(unknown_methods) > 1 else ""
        methods_label = ", ".join(unknown_methods)

        # we... er, can't do anything with only unrecognized auth types
        if not auth_methods:
            exc_msg = "unrecognized authentication method%s (%s)" % (
                plural_label, methods_label)
            auth_exceptions.append(
                UnrecognizedAuthMethods(exc_msg, unknown_methods))
        else:
            log.debug(
                "Authenticating to a socket with unrecognized auth method%s, ignoring them: %s"
                % (plural_label, methods_label))

    if protocolinfo_response.cookie_path is None:
        for cookie_auth_method in (AuthMethod.COOKIE, AuthMethod.SAFECOOKIE):
            if cookie_auth_method in auth_methods:
                auth_methods.remove(cookie_auth_method)

                exc_msg = "our PROTOCOLINFO response did not have the location of our authentication cookie"
                auth_exceptions.append(
                    NoAuthCookie(exc_msg,
                                 cookie_auth_method == AuthMethod.SAFECOOKIE))

    if AuthMethod.PASSWORD in auth_methods and password is None:
        auth_methods.remove(AuthMethod.PASSWORD)
        auth_exceptions.append(MissingPassword("no passphrase provided"))
Beispiel #47
0
 def run(self):
   while not self._halt:
     timeSinceReset = time.time() - self.lastLookup
     
     if self.resolveRate == 0:
       self._cond.acquire()
       if not self._halt: self._cond.wait(0.2)
       self._cond.release()
       
       continue
     elif timeSinceReset < self.resolveRate:
       sleepTime = max(0.2, self.resolveRate - timeSinceReset)
       
       self._cond.acquire()
       if not self._halt: self._cond.wait(sleepTime)
       self._cond.release()
       
       continue # done waiting, try again
     
     newValues = {}
     try:
       if self._useProc:
         utime, stime, startTime = proc.get_stats(self.processPid, proc.Stat.CPU_UTIME, proc.Stat.CPU_STIME, proc.Stat.START_TIME)
         totalCpuTime = float(utime) + float(stime)
         cpuDelta = totalCpuTime - self._lastCpuTotal
         newValues["cpuSampling"] = cpuDelta / timeSinceReset
         newValues["cpuAvg"] = totalCpuTime / (time.time() - float(startTime))
         newValues["_lastCpuTotal"] = totalCpuTime
         
         memUsage = int(proc.get_memory_usage(self.processPid)[0])
         totalMemory = proc.get_physical_memory()
         newValues["memUsage"] = memUsage
         newValues["memUsagePercentage"] = float(memUsage) / totalMemory
       else:
         # the ps call formats results as:
         # 
         #     TIME     ELAPSED   RSS %MEM
         # 3-08:06:32 21-00:00:12 121844 23.5
         # 
         # or if Tor has only recently been started:
         # 
         #     TIME      ELAPSED    RSS %MEM
         #  0:04.40        37:57  18772  0.9
         
         psCall = system.call("ps -p %s -o cputime,etime,rss,%%mem" % self.processPid)
         
         isSuccessful = False
         if psCall and len(psCall) >= 2:
           stats = psCall[1].strip().split()
           
           if len(stats) == 4:
             try:
               totalCpuTime = str_tools.parse_short_time_label(stats[0])
               uptime = str_tools.parse_short_time_label(stats[1])
               cpuDelta = totalCpuTime - self._lastCpuTotal
               newValues["cpuSampling"] = cpuDelta / timeSinceReset
               newValues["cpuAvg"] = totalCpuTime / uptime
               newValues["_lastCpuTotal"] = totalCpuTime
               
               newValues["memUsage"] = int(stats[2]) * 1024 # ps size is in kb
               newValues["memUsagePercentage"] = float(stats[3]) / 100.0
               isSuccessful = True
             except ValueError, exc: pass
         
         if not isSuccessful:
           raise IOError("unrecognized output from ps: %s" % psCall)
     except IOError, exc:
       newValues = {}
       self._failureCount += 1
       
       if self._useProc:
         if self._failureCount >= 3:
           # We've failed three times resolving via proc. Warn, and fall back
           # to ps resolutions.
           log.info("Failed three attempts to get process resource usage from proc, falling back to ps (%s)" % exc)
           
           self._useProc = False
           self._failureCount = 1 # prevents lastQueryFailed() from thinking that we succeeded
         else:
           # wait a bit and try again
           log.debug("Unable to query process resource usage from proc (%s)" % exc)
           self._cond.acquire()
           if not self._halt: self._cond.wait(0.5)
           self._cond.release()
       else:
         # exponential backoff on making failed ps calls
         sleepTime = 0.01 * (2 ** self._failureCount) + self._failureCount
         log.debug("Unable to query process resource usage from ps, waiting %0.2f seconds (%s)" % (sleepTime, exc))
         self._cond.acquire()
         if not self._halt: self._cond.wait(sleepTime)
         self._cond.release()
     
     # sets the new values
     if newValues:
       # If this is the first run then the cpuSampling stat is meaningless
       # (there isn't a previous tick to sample from so it's zero at this
       # point). Setting it to the average, which is a fairer estimate.
       if self.lastLookup == -1:
         newValues["cpuSampling"] = newValues["cpuAvg"]
       
       self._valLock.acquire()
       self.cpuSampling = newValues["cpuSampling"]
       self.cpuAvg = newValues["cpuAvg"]
       self.memUsage = newValues["memUsage"]
       self.memUsagePercentage = newValues["memUsagePercentage"]
       self._lastCpuTotal = newValues["_lastCpuTotal"]
       self.lastLookup = time.time()
       self._runCount += 1
       self._failureCount = 0
       self._valLock.release()
Beispiel #48
0
  def _parse_message(self):
    # Example:
    #   250-PROTOCOLINFO 1
    #   250-AUTH METHODS=COOKIE COOKIEFILE="/home/atagar/.tor/control_auth_cookie"
    #   250-VERSION Tor="0.2.1.30"
    #   250 OK

    self.protocol_version = None
    self.tor_version = None
    self.auth_methods = ()
    self.unknown_auth_methods = ()
    self.cookie_path = None

    auth_methods, unknown_auth_methods = [], []
    remaining_lines = list(self)

    if not self.is_ok() or not remaining_lines.pop() == "OK":
      raise stem.ProtocolError("PROTOCOLINFO response didn't have an OK status:\n%s" % self)

    # sanity check that we're a PROTOCOLINFO response
    if not remaining_lines[0].startswith("PROTOCOLINFO"):
      raise stem.ProtocolError("Message is not a PROTOCOLINFO response:\n%s" % self)

    while remaining_lines:
      line = remaining_lines.pop(0)
      line_type = line.pop()

      if line_type == "PROTOCOLINFO":
        # Line format:
        #   FirstLine = "PROTOCOLINFO" SP PIVERSION CRLF
        #   PIVERSION = 1*DIGIT

        if line.is_empty():
          raise stem.ProtocolError("PROTOCOLINFO response's initial line is missing the protocol version: %s" % line)

        try:
          self.protocol_version = int(line.pop())
        except ValueError:
          raise stem.ProtocolError("PROTOCOLINFO response version is non-numeric: %s" % line)

        # The piversion really should be "1" but, according to the spec, tor
        # does not necessarily need to provide the PROTOCOLINFO version that we
        # requested. Log if it's something we aren't expecting but still make
        # an effort to parse like a v1 response.

        if self.protocol_version != 1:
          log.info("We made a PROTOCOLINFO version 1 query but got a version %i response instead. We'll still try to use it, but this may cause problems." % self.protocol_version)
      elif line_type == "AUTH":
        # Line format:
        #   AuthLine = "250-AUTH" SP "METHODS=" AuthMethod *("," AuthMethod)
        #              *(SP "COOKIEFILE=" AuthCookieFile) CRLF
        #   AuthMethod = "NULL" / "HASHEDPASSWORD" / "COOKIE"
        #   AuthCookieFile = QuotedString

        # parse AuthMethod mapping
        if not line.is_next_mapping("METHODS"):
          raise stem.ProtocolError("PROTOCOLINFO response's AUTH line is missing its mandatory 'METHODS' mapping: %s" % line)

        for method in line.pop_mapping()[1].split(","):
          if method == "NULL":
            auth_methods.append(AuthMethod.NONE)
          elif method == "HASHEDPASSWORD":
            auth_methods.append(AuthMethod.PASSWORD)
          elif method == "COOKIE":
            auth_methods.append(AuthMethod.COOKIE)
          elif method == "SAFECOOKIE":
            auth_methods.append(AuthMethod.SAFECOOKIE)
          else:
            unknown_auth_methods.append(method)
            message_id = "stem.response.protocolinfo.unknown_auth_%s" % method
            log.log_once(message_id, log.INFO, "PROTOCOLINFO response included a type of authentication that we don't recognize: %s" % method)

            # our auth_methods should have a single AuthMethod.UNKNOWN entry if
            # any unknown authentication methods exist
            if not AuthMethod.UNKNOWN in auth_methods:
              auth_methods.append(AuthMethod.UNKNOWN)

        # parse optional COOKIEFILE mapping (quoted and can have escapes)
        if line.is_next_mapping("COOKIEFILE", True, True):
          self.cookie_path = line.pop_mapping(True, True)[1]
      elif line_type == "VERSION":
        # Line format:
        #   VersionLine = "250-VERSION" SP "Tor=" TorVersion OptArguments CRLF
        #   TorVersion = QuotedString

        if not line.is_next_mapping("Tor", True):
          raise stem.ProtocolError("PROTOCOLINFO response's VERSION line is missing its mandatory tor version mapping: %s" % line)

        try:
          self.tor_version = stem.version.Version(line.pop_mapping(True)[1])
        except ValueError as exc:
          raise stem.ProtocolError(exc)
      else:
        log.debug("Unrecognized PROTOCOLINFO line type '%s', ignoring it: %s" % (line_type, line))

    self.auth_methods = tuple(auth_methods)
    self.unknown_auth_methods = tuple(unknown_auth_methods)
Beispiel #49
0
def call(command, default = UNDEFINED, ignore_exit_status = False, timeout = None, cwd = None, env = None):
  """
  call(command, default = UNDEFINED, ignore_exit_status = False)

  Issues a command in a subprocess, blocking until completion and returning the
  results. This is not actually ran in a shell so pipes and other shell syntax
  are not permitted.

  .. versionchanged:: 1.5.0
     Providing additional information upon failure by raising a CallError. This
     is a subclass of OSError, providing backward compatibility.

  .. versionchanged:: 1.5.0
     Added env argument.

  .. versionchanged:: 1.6.0
     Added timeout and cwd arguments.

  :param str,list command: command to be issued
  :param object default: response if the query fails
  :param bool ignore_exit_status: reports failure if our command's exit status
    was non-zero
  :param float timeout: maximum seconds to wait, blocks indefinitely if
    **None**
  :param dict env: environment variables

  :returns: **list** with the lines of output from the command

  :raises:
    * **CallError** if this fails and no default was provided
    * **CallTimeoutError** if the timeout is reached without a default
  """

  # TODO: in stem 2.x return a struct with stdout, stderr, and runtime instead

  global SYSTEM_CALL_TIME

  if isinstance(command, str):
    command_list = command.split(' ')
  else:
    command_list = list(map(str, command))

  exit_status, runtime, stdout, stderr = None, None, None, None
  start_time = time.time()

  try:
    is_shell_command = command_list[0] in SHELL_COMMANDS

    process = subprocess.Popen(command_list, stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = is_shell_command, cwd = cwd, env = env)

    if timeout:
      while process.poll() is None:
        if time.time() - start_time > timeout:
          raise CallTimeoutError("Process didn't finish after %0.1f seconds" % timeout, ' '.join(command_list), None, timeout, '', '', timeout)

        time.sleep(0.001)

    stdout, stderr = process.communicate()
    stdout, stderr = stdout.strip(), stderr.strip()
    runtime = time.time() - start_time

    log.debug('System call: %s (runtime: %0.2f)' % (command, runtime))

    if log.is_tracing():
      trace_prefix = 'Received from system (%s)' % command

      if stdout and stderr:
        log.trace(trace_prefix + ', stdout:\n%s\nstderr:\n%s' % (stdout, stderr))
      elif stdout:
        log.trace(trace_prefix + ', stdout:\n%s' % stdout)
      elif stderr:
        log.trace(trace_prefix + ', stderr:\n%s' % stderr)

    exit_status = process.poll()

    if not ignore_exit_status and exit_status != 0:
      raise OSError('%s returned exit status %i' % (command, exit_status))

    if stdout:
      return stdout.decode('utf-8', 'replace').splitlines()
    else:
      return []
  except CallTimeoutError:
    log.debug('System call (timeout): %s (after %0.4fs)' % (command, timeout))

    if default != UNDEFINED:
      return default
    else:
      raise
  except OSError as exc:
    log.debug('System call (failed): %s (error: %s)' % (command, exc))

    if default != UNDEFINED:
      return default
    else:
      raise CallError(str(exc), ' '.join(command_list), exit_status, runtime, stdout, stderr)
  finally:
    with SYSTEM_CALL_TIME_LOCK:
      SYSTEM_CALL_TIME += time.time() - start_time
Beispiel #50
0
def authenticate(controller, password = None, chroot_path = None, protocolinfo_response = None):
  """
  Authenticates to a control socket using the information provided by a
  PROTOCOLINFO response. In practice this will often be all we need to
  authenticate, raising an exception if all attempts to authenticate fail.

  All exceptions are subclasses of AuthenticationFailure so, in practice,
  callers should catch the types of authentication failure that they care
  about, then have a :class:`~stem.connection.AuthenticationFailure` catch-all
  at the end.

  This can authenticate to either a :class:`~stem.control.BaseController` or
  :class:`~stem.socket.ControlSocket`.

  :param controller: tor controller or socket to be authenticated
  :param str password: passphrase to present to the socket if it uses password
    authentication (skips password auth if **None**)
  :param str chroot_path: path prefix if in a chroot environment
  :param stem.response.protocolinfo.ProtocolInfoResponse protocolinfo_response:
    tor protocolinfo response, this is retrieved on our own if **None**

  :raises: If all attempts to authenticate fails then this will raise a
    :class:`~stem.connection.AuthenticationFailure` subclass. Since this may
    try multiple authentication methods it may encounter multiple exceptions.
    If so then the exception this raises is prioritized as follows...

    * :class:`stem.connection.IncorrectSocketType`

      The controller does not speak the tor control protocol. Most often this
      happened because the user confused the SocksPort or ORPort with the
      ControlPort.

    * :class:`stem.connection.UnrecognizedAuthMethods`

      All of the authentication methods tor will accept are new and
      unrecognized. Please upgrade stem and, if that doesn't work, file a
      ticket on 'trac.torproject.org' and I'd be happy to add support.

    * :class:`stem.connection.MissingPassword`

      We were unable to authenticate but didn't attempt password authentication
      because none was provided. You should prompt the user for a password and
      try again via 'authenticate_password'.

    * :class:`stem.connection.IncorrectPassword`

      We were provided with a password but it was incorrect.

    * :class:`stem.connection.IncorrectCookieSize`

      Tor allows for authentication by reading it a cookie file, but that file
      is the wrong size to be an authentication cookie.

    * :class:`stem.connection.UnreadableCookieFile`

      Tor allows for authentication by reading it a cookie file, but we can't
      read that file (probably due to permissions).

    * **\***:class:`stem.connection.IncorrectCookieValue`

      Tor allows for authentication by reading it a cookie file, but rejected
      the contents of that file.

    * **\***:class:`stem.connection.AuthChallengeUnsupported`

      Tor doesn't recognize the AUTHCHALLENGE command. This is probably a Tor
      version prior to SAFECOOKIE being implement, but this exception shouldn't
      arise because we won't attempt SAFECOOKIE auth unless Tor claims to
      support it.

    * **\***:class:`stem.connection.UnrecognizedAuthChallengeMethod`

      Tor couldn't recognize the AUTHCHALLENGE method Stem sent to it. This
      shouldn't happen at all.

    * **\***:class:`stem.connection.InvalidClientNonce`

      Tor says that the client nonce provided by Stem during the AUTHCHALLENGE
      process is invalid.

    * **\***:class:`stem.connection.AuthSecurityFailure`

      Nonce value provided by the server was invalid.

    * **\***:class:`stem.connection.OpenAuthRejected`

      Tor says that it allows for authentication without any credentials, but
      then rejected our authentication attempt.

    * **\***:class:`stem.connection.MissingAuthInfo`

      Tor provided us with a PROTOCOLINFO reply that is technically valid, but
      missing the information we need to authenticate.

    * **\***:class:`stem.connection.AuthenticationFailure`

      There are numerous other ways that authentication could have failed
      including socket failures, malformed controller responses, etc. These
      mostly constitute transient failures or bugs.

    **\*** In practice it is highly unusual for this to occur, being more of a
    theoretical possibility rather than something you should expect. It's fine
    to treat these as errors. If you have a use case where this commonly
    happens, please file a ticket on 'trac.torproject.org'.

    In the future new :class:`~stem.connection.AuthenticationFailure`
    subclasses may be added to allow for better error handling.
  """

  if not protocolinfo_response:
    try:
      protocolinfo_response = get_protocolinfo(controller)
    except stem.ProtocolError:
      raise IncorrectSocketType('unable to use the control socket')
    except stem.SocketError as exc:
      raise AuthenticationFailure('socket connection failed (%s)' % exc)

  auth_methods = list(protocolinfo_response.auth_methods)
  auth_exceptions = []

  if len(auth_methods) == 0:
    raise NoAuthMethods('our PROTOCOLINFO response did not have any methods for authenticating')

  # remove authentication methods that are either unknown or for which we don't
  # have an input
  if AuthMethod.UNKNOWN in auth_methods:
    auth_methods.remove(AuthMethod.UNKNOWN)

    unknown_methods = protocolinfo_response.unknown_auth_methods
    plural_label = 's' if len(unknown_methods) > 1 else ''
    methods_label = ', '.join(unknown_methods)

    # we... er, can't do anything with only unrecognized auth types
    if not auth_methods:
      exc_msg = 'unrecognized authentication method%s (%s)' % (plural_label, methods_label)
      auth_exceptions.append(UnrecognizedAuthMethods(exc_msg, unknown_methods))
    else:
      log.debug('Authenticating to a socket with unrecognized auth method%s, ignoring them: %s' % (plural_label, methods_label))

  if protocolinfo_response.cookie_path is None:
    for cookie_auth_method in (AuthMethod.COOKIE, AuthMethod.SAFECOOKIE):
      if cookie_auth_method in auth_methods:
        auth_methods.remove(cookie_auth_method)

        exc_msg = 'our PROTOCOLINFO response did not have the location of our authentication cookie'
        auth_exceptions.append(NoAuthCookie(exc_msg, cookie_auth_method == AuthMethod.SAFECOOKIE))

  if AuthMethod.PASSWORD in auth_methods and password is None:
    auth_methods.remove(AuthMethod.PASSWORD)
    auth_exceptions.append(MissingPassword('no passphrase provided'))

  # iterating over AuthMethods so we can try them in this order
  for auth_type in (AuthMethod.NONE, AuthMethod.PASSWORD, AuthMethod.SAFECOOKIE, AuthMethod.COOKIE):
    if auth_type not in auth_methods:
      continue

    try:
      if auth_type == AuthMethod.NONE:
        authenticate_none(controller, False)
      elif auth_type == AuthMethod.PASSWORD:
        authenticate_password(controller, password, False)
      elif auth_type in (AuthMethod.COOKIE, AuthMethod.SAFECOOKIE):
        cookie_path = protocolinfo_response.cookie_path

        if chroot_path:
          cookie_path = os.path.join(chroot_path, cookie_path.lstrip(os.path.sep))

        if auth_type == AuthMethod.SAFECOOKIE:
          authenticate_safecookie(controller, cookie_path, False)
        else:
          authenticate_cookie(controller, cookie_path, False)

      return  # success!
    except OpenAuthRejected as exc:
      auth_exceptions.append(exc)
    except IncorrectPassword as exc:
      auth_exceptions.append(exc)
    except PasswordAuthRejected as exc:
      # Since the PROTOCOLINFO says password auth is available we can assume
      # that if PasswordAuthRejected is raised it's being raised in error.
      log.debug('The authenticate_password method raised a PasswordAuthRejected when password auth should be available. Stem may need to be corrected to recognize this response: %s' % exc)
      auth_exceptions.append(IncorrectPassword(str(exc)))
    except AuthSecurityFailure as exc:
      log.info('Tor failed to provide the nonce expected for safecookie authentication. (%s)' % exc)
      auth_exceptions.append(exc)
    except (InvalidClientNonce, UnrecognizedAuthChallengeMethod, AuthChallengeFailed) as exc:
      auth_exceptions.append(exc)
    except (IncorrectCookieSize, UnreadableCookieFile, IncorrectCookieValue) as exc:
      auth_exceptions.append(exc)
    except CookieAuthRejected as exc:
      auth_func = 'authenticate_safecookie' if exc.is_safecookie else 'authenticate_cookie'

      log.debug('The %s method raised a CookieAuthRejected when cookie auth should be available. Stem may need to be corrected to recognize this response: %s' % (auth_func, exc))
      auth_exceptions.append(IncorrectCookieValue(str(exc), exc.cookie_path, exc.is_safecookie))
    except stem.ControllerError as exc:
      auth_exceptions.append(AuthenticationFailure(str(exc)))

  # All authentication attempts failed. Raise the exception that takes priority
  # according to our pydocs.

  for exc_type in AUTHENTICATE_EXCEPTIONS:
    for auth_exc in auth_exceptions:
      if isinstance(auth_exc, exc_type):
        raise auth_exc

  # We really, really shouldn't get here. It means that auth_exceptions is
  # either empty or contains something that isn't an AuthenticationFailure.

  raise AssertionError('BUG: Authentication failed without providing a recognized exception: %s' % str(auth_exceptions))
Beispiel #51
0
def pid_by_name(process_name, multiple = False):
  """
  Attempts to determine the process id for a running process, using...

  ::

    1. pgrep -x <name>
    2. pidof <name>
    3. ps -o pid -C <name> (linux)
       ps axc | egrep " <name>$" (bsd)
    4. lsof -tc <name>
    5. tasklist | str <name>.exe

  :param str process_name: process name for which to fetch the pid
  :param bool multiple: provides a list of all pids if **True**, otherwise
    results with multiple processes are discarded

  :returns:
    Response depends upon the 'multiple' argument as follows...

    * if **False** then this provides an **int** with the process id or **None** if it can't be determined
    * if **True** then this provides a **list** of all **int** process ids, and an empty list if it can't be determined
  """

  # attempts to resolve using pgrep, failing if:
  # - we're running on bsd (command unavailable)
  #
  # example output:
  #   atagar@morrigan:~$ pgrep -x vim
  #   3283
  #   3392

  if is_available('pgrep'):
    results = call(GET_PID_BY_NAME_PGREP % process_name, None)

    if results:
      try:
        pids = list(map(int, results))

        if multiple:
          return pids
        elif len(pids) == 1:
          return pids[0]
      except ValueError:
        pass

  # attempts to resolve using pidof, failing if:
  # - we're running on bsd (command unavailable)
  #
  # example output:
  #   atagar@morrigan:~$ pidof vim
  #   3392 3283

  if is_available('pidof'):
    results = call(GET_PID_BY_NAME_PIDOF % process_name, None)

    if results and len(results) == 1:
      try:
        pids = list(map(int, results[0].split()))

        if multiple:
          return pids
        elif len(pids) == 1:
          return pids[0]
      except ValueError:
        pass

  # attempts to resolve using ps, failing if:
  # - system's ps variant doesn't handle these flags (none known at the moment)
  #
  # example output:
  #   atagar@morrigan:~/Desktop/stem$ ps -o pid -C vim
  #     PID
  #    3283
  #    3392
  #
  #   atagar$ ps axc
  #     PID   TT  STAT      TIME COMMAND
  #       1   ??  Ss     9:00.22 launchd
  #      10   ??  Ss     0:09.97 kextd
  #      11   ??  Ss     5:47.36 DirectoryService
  #      12   ??  Ss     3:01.44 notifyd

  if is_available('ps'):
    if not is_bsd():
      # linux variant of ps
      results = call(GET_PID_BY_NAME_PS_LINUX % process_name, None)

      if results:
        try:
          pids = list(map(int, results[1:]))

          if multiple:
            return pids
          elif len(pids) == 1:
            return pids[0]
        except ValueError:
          pass

    if is_bsd():
      # bsd variant of ps
      results = call(GET_PID_BY_NAME_PS_BSD, None)

      if results:
        # filters results to those with our process name
        results = [r.split()[0] for r in results if r.endswith(' %s' % process_name)]

        try:
          pids = list(map(int, results))

          if multiple:
            return pids
          elif len(pids) == 1:
            return pids[0]
        except ValueError:
          pass

  # resolves using lsof which works on both Linux and BSD, only failing if:
  # - lsof is unavailable (not included by default on OpenBSD)
  # - the process being run as a different user due to permissions
  # - the process doesn't have any open files to be reported by lsof?
  #
  # flags:
  #   t - only show pids
  #   c - restrict results to that command
  #
  # example output:
  #   atagar@morrigan:~$ lsof -t -c vim
  #   2470
  #   2561

  if is_available('lsof'):
    results = call(GET_PID_BY_NAME_LSOF % process_name, None)

    if results:
      try:
        pids = list(map(int, results))

        if multiple:
          return pids
        elif len(pids) == 1:
          return pids[0]
      except ValueError:
        pass

  if is_available('tasklist') and is_windows():
    if not process_name.endswith('.exe'):
      process_name = process_name + '.exe'

    process_ids = []

    results = stem.util.system.call('tasklist', None)

    if results:
      tasklist_regex = re.compile('^\s*%s\s+(?P<pid>[0-9]*)' % process_name)

      for line in results:
        match = tasklist_regex.search(line)

        if match:
          process_ids.append(int(match.group('pid')))

      if multiple:
        return process_ids
      elif len(process_ids) > 0:
        return process_ids[0]

  log.debug("failed to resolve a pid for '%s'" % process_name)
  return [] if multiple else None