Пример #1
0
def pep8_issues(base_path = DEFAULT_TARGET):
  """
  Checks for stylistic issues that are an issue according to the parts of PEP8
  we conform to.

  :param str base_path: directory to be iterated over

  :returns: dict of the form ``path => [(line_number, message)...]``
  """

  # The pep8 command give output of the form...
  #
  #   FILE:LINE:CHARACTER ISSUE
  #
  # ... for instance...
  #
  #   ./test/mocking.py:868:31: E225 missing whitespace around operator
  #
  # Ignoring the following compliance issues.
  #
  # * E251 no spaces around keyword / parameter equals
  #
  #   This one I dislike a great deal. It makes keyword arguments different
  #   from assignments which looks... aweful. I'm not sure what PEP8's author
  #   was on when he wrote this one but it's stupid.
  #
  #   Someone else can change this if they really care.
  #
  # * E501 line is over 79 characters
  #
  #   We're no longer on TTY terminals. Overly constraining line length makes
  #   things far less readable, encouraging bad practices like abbreviated
  #   variable names.
  #
  #   If the code fits on my tiny netbook screen then it's narrow enough.
  #
  # * E111 and E121 four space indentations
  #
  #   Ahhh, indentation. The holy war that'll never die. Sticking with two
  #   space indentations since it leads to shorter lines.
  #
  # * E127 continuation line over-indented for visual indent
  #
  #   Pep8 only works with this one if we have four space indents (its
  #   detection is based on multiples of four).

  ignored_issues = "E111,E121,E501,E251,E127"

  issues = {}
  pep8_output = system.call("pep8 --ignore %s %s" % (ignored_issues, base_path))

  for line in pep8_output:
    line_match = re.match("^(.*):(\d+):(\d+): (.*)$", line)

    if line_match:
      path, line, _, issue = line_match.groups()
      issues.setdefault(path, []).append((int(line), issue))

  return issues
Пример #2
0
def is_wide_characters_supported():
  """
  Checks if our version of curses has wide character support. This is required
  to print unicode.

  :returns: **bool** that's **True** if curses supports wide characters, and
    **False** if it either can't or this can't be determined
  """

  try:
    # Gets the dynamic library used by the interpretor for curses. This uses
    # 'ldd' on Linux or 'otool -L' on OSX.
    #
    # atagar@fenrir:~/Desktop$ ldd /usr/lib/python2.6/lib-dynload/_curses.so
    #   linux-gate.so.1 =>  (0x00a51000)
    #   libncursesw.so.5 => /lib/libncursesw.so.5 (0x00faa000)
    #   libpthread.so.0 => /lib/tls/i686/cmov/libpthread.so.0 (0x002f1000)
    #   libc.so.6 => /lib/tls/i686/cmov/libc.so.6 (0x00158000)
    #   libdl.so.2 => /lib/tls/i686/cmov/libdl.so.2 (0x00398000)
    #   /lib/ld-linux.so.2 (0x00ca8000)
    #
    # atagar$ otool -L /System/Library/Frameworks/Python.framework/Versions/2.5/lib/python2.5/lib-dynload/_curses.so
    # /System/Library/Frameworks/Python.framework/Versions/2.5/lib/python2.5/lib-dynload/_curses.so:
    #   /usr/lib/libncurses.5.4.dylib (compatibility version 5.4.0, current version 5.4.0)
    #   /usr/lib/libgcc_s.1.dylib (compatibility version 1.0.0, current version 1.0.0)
    #   /usr/lib/libSystem.B.dylib (compatibility version 1.0.0, current version 111.1.6)

    import _curses

    lib_dependency_lines = None

    if system.is_available('ldd'):
      lib_dependency_lines = system.call('ldd %s' % _curses.__file__)
    elif system.is_available('otool'):
      lib_dependency_lines = system.call('otool -L %s' % _curses.__file__)

    if lib_dependency_lines:
      for line in lib_dependency_lines:
        if 'libncursesw' in line:
          return True
  except:
    pass

  return False
Пример #3
0
def _isWideCharactersAvailable():
    """
  True if curses has wide character support (which is required to print
  unicode). False otherwise.
  """

    try:
        # gets the dynamic library used by the interpretor for curses

        import _curses
        cursesLib = _curses.__file__

        # Uses 'ldd' (Linux) or 'otool -L' (Mac) to determine the curses
        # library dependencies.
        #
        # atagar@fenrir:~/Desktop$ ldd /usr/lib/python2.6/lib-dynload/_curses.so
        #   linux-gate.so.1 =>  (0x00a51000)
        #   libncursesw.so.5 => /lib/libncursesw.so.5 (0x00faa000)
        #   libpthread.so.0 => /lib/tls/i686/cmov/libpthread.so.0 (0x002f1000)
        #   libc.so.6 => /lib/tls/i686/cmov/libc.so.6 (0x00158000)
        #   libdl.so.2 => /lib/tls/i686/cmov/libdl.so.2 (0x00398000)
        #   /lib/ld-linux.so.2 (0x00ca8000)
        #
        # atagar$ otool -L /System/Library/Frameworks/Python.framework/Versions/2.5/lib/python2.5/lib-dynload/_curses.so
        # /System/Library/Frameworks/Python.framework/Versions/2.5/lib/python2.5/lib-dynload/_curses.so:
        #   /usr/lib/libncurses.5.4.dylib (compatibility version 5.4.0, current version 5.4.0)
        #   /usr/lib/libgcc_s.1.dylib (compatibility version 1.0.0, current version 1.0.0)
        #   /usr/lib/libSystem.B.dylib (compatibility version 1.0.0, current version 111.1.6)

        libDependencyLines = None
        if system.is_available("ldd"):
            libDependencyLines = system.call("ldd %s" % cursesLib)
        elif system.is_available("otool"):
            libDependencyLines = system.call("otool -L %s" % cursesLib)

        if libDependencyLines:
            for line in libDependencyLines:
                if "libncursesw" in line: return True
    except:
        pass

    return False
Пример #4
0
def _isWideCharactersAvailable():
  """
  True if curses has wide character support (which is required to print
  unicode). False otherwise.
  """
  
  try:
    # gets the dynamic library used by the interpretor for curses
    
    import _curses
    cursesLib = _curses.__file__
    
    # Uses 'ldd' (Linux) or 'otool -L' (Mac) to determine the curses
    # library dependencies.
    # 
    # atagar@fenrir:~/Desktop$ ldd /usr/lib/python2.6/lib-dynload/_curses.so
    #   linux-gate.so.1 =>  (0x00a51000)
    #   libncursesw.so.5 => /lib/libncursesw.so.5 (0x00faa000)
    #   libpthread.so.0 => /lib/tls/i686/cmov/libpthread.so.0 (0x002f1000)
    #   libc.so.6 => /lib/tls/i686/cmov/libc.so.6 (0x00158000)
    #   libdl.so.2 => /lib/tls/i686/cmov/libdl.so.2 (0x00398000)
    #   /lib/ld-linux.so.2 (0x00ca8000)
    # 
    # atagar$ otool -L /System/Library/Frameworks/Python.framework/Versions/2.5/lib/python2.5/lib-dynload/_curses.so
    # /System/Library/Frameworks/Python.framework/Versions/2.5/lib/python2.5/lib-dynload/_curses.so:
    #   /usr/lib/libncurses.5.4.dylib (compatibility version 5.4.0, current version 5.4.0)
    #   /usr/lib/libgcc_s.1.dylib (compatibility version 1.0.0, current version 1.0.0)
    #   /usr/lib/libSystem.B.dylib (compatibility version 1.0.0, current version 111.1.6)
    
    libDependencyLines = None
    if system.is_available("ldd"):
      libDependencyLines = system.call("ldd %s" % cursesLib)
    elif system.is_available("otool"):
      libDependencyLines = system.call("otool -L %s" % cursesLib)
    
    if libDependencyLines:
      for line in libDependencyLines:
        if "libncursesw" in line: return True
  except: pass
  
  return False
Пример #5
0
def _resources_via_ps(pid):
    """
  Fetches resource usage information about a given process via ps. This returns
  a tuple of the form...

    (total_cpu_time, uptime, memory_in_bytes, memory_in_percent)

  :param int pid: process to be queried

  :returns: **tuple** with the resource usage information

  :raises: **IOError** if unsuccessful
  """

    # ps results are of the form...
    #
    #     TIME     ELAPSED   RSS %MEM
    # 3-08:06:32 21-00:00:12 121844 23.5
    #
    # ... or if Tor has only recently been started...
    #
    #     TIME      ELAPSED    RSS %MEM
    #  0:04.40        37:57  18772  0.9

    try:
        ps_call = system.call(
            'ps -p {pid} -o cputime,etime,rss,%mem'.format(pid=pid))
    except OSError as exc:
        raise IOError(exc)

    if ps_call and len(ps_call) >= 2:
        stats = ps_call[1].strip().split()

        if len(stats) == 4:
            try:
                total_cpu_time = str_tools.parse_short_time_label(stats[0])
                uptime = str_tools.parse_short_time_label(stats[1])
                memory_bytes = int(stats[2]) * 1024  # ps size is in kb
                memory_percent = float(stats[3]) / 100.0

                return (total_cpu_time, uptime, memory_bytes, memory_percent)
            except ValueError:
                pass

    raise IOError('unrecognized output from ps: %s' % ps_call)
Пример #6
0
def _resources_via_ps(pid):
  """
  Fetches resource usage information about a given process via ps. This returns
  a tuple of the form...

    (total_cpu_time, uptime, memory_in_bytes, memory_in_percent)

  :param int pid: process to be queried

  :returns: **tuple** with the resource usage information

  :raises: **IOError** if unsuccessful
  """

  # ps results are of the form...
  #
  #     TIME     ELAPSED   RSS %MEM
  # 3-08:06:32 21-00:00:12 121844 23.5
  #
  # ... or if Tor has only recently been started...
  #
  #     TIME      ELAPSED    RSS %MEM
  #  0:04.40        37:57  18772  0.9

  try:
    ps_call = system.call('ps -p {pid} -o cputime,etime,rss,%mem'.format(pid = pid))
  except OSError as exc:
    raise IOError(exc)

  if ps_call and len(ps_call) >= 2:
    stats = ps_call[1].strip().split()

    if len(stats) == 4:
      try:
        total_cpu_time = str_tools.parse_short_time_label(stats[0])
        uptime = str_tools.parse_short_time_label(stats[1])
        memory_bytes = int(stats[2]) * 1024  # ps size is in kb
        memory_percent = float(stats[3]) / 100.0

        return (total_cpu_time, uptime, memory_bytes, memory_percent)
      except ValueError:
        pass

  raise IOError('unrecognized output from ps: %s' % ps_call)
Пример #7
0
def pyflakes_issues(base_path=DEFAULT_TARGET):
    """
  Checks for issues via pyflakes. False positives can be whitelisted via our
  test configuration.

  :param str base_path: directory to be iterated over

  :returns: dict of the form ``path => [(line_number, message)...]``
  """

    global PYFLAKES_IGNORE

    if PYFLAKES_IGNORE is None:
        pyflakes_ignore = {}

        for line in CONFIG["pyflakes.ignore"]:
            path, issue = line.split("=>")
            pyflakes_ignore.setdefault(path.strip(), []).append(issue.strip())

        PYFLAKES_IGNORE = pyflakes_ignore

    # Pyflakes issues are of the form...
    #
    #   FILE:LINE: ISSUE
    #
    # ... for instance...
    #
    #   stem/prereq.py:73: 'long_to_bytes' imported but unused
    #   stem/control.py:957: undefined name 'entry'

    issues = {}
    pyflakes_output = system.call("pyflakes %s" % base_path)

    for line in pyflakes_output:
        line_match = re.match("^(.*):(\d+): (.*)$", line)

        if line_match:
            path, line, issue = line_match.groups()

            if not _is_test_data(path) and not issue in PYFLAKES_IGNORE.get(
                    path, []):
                issues.setdefault(path, []).append((int(line), issue))

    return issues
Пример #8
0
def pyflakes_issues(base_path = DEFAULT_TARGET):
  """
  Checks for issues via pyflakes. False positives can be whitelisted via our
  test configuration.

  :param str base_path: directory to be iterated over

  :returns: dict of the form ``path => [(line_number, message)...]``
  """

  global PYFLAKES_IGNORE

  if PYFLAKES_IGNORE is None:
    pyflakes_ignore = {}

    for line in CONFIG["pyflakes.ignore"]:
      path, issue = line.split("=>")
      pyflakes_ignore.setdefault(path.strip(), []).append(issue.strip())

    PYFLAKES_IGNORE = pyflakes_ignore

  # Pyflakes issues are of the form...
  #
  #   FILE:LINE: ISSUE
  #
  # ... for instance...
  #
  #   stem/prereq.py:73: 'long_to_bytes' imported but unused
  #   stem/control.py:957: undefined name 'entry'

  issues = {}
  pyflakes_output = system.call("pyflakes %s" % base_path)

  for line in pyflakes_output:
    line_match = re.match("^(.*):(\d+): (.*)$", line)

    if line_match:
      path, line, issue = line_match.groups()

      if not issue in PYFLAKES_IGNORE.get(path, []):
        issues.setdefault(path, []).append((int(line), issue))

  return issues
Пример #9
0
def _resolveViaHost(ipAddr):
    """
  Performs a host lookup for the given IP, returning the resolved hostname.
  This raises an IOError if the lookup fails (os or network issue), and a
  ValueError in the case of DNS errors (address is unresolvable).
  
  Arguments:
    ipAddr - ip address to be resolved
  """

    hostname = system.call("host %s" % ipAddr)[0].split()[-1:][0]

    if hostname == "reached":
        # got message: ";; connection timed out; no servers could be reached"
        raise IOError("lookup timed out")
    elif hostname in DNS_ERROR_CODES:
        # got error response (can't do resolution on address)
        raise ValueError("address is unresolvable: %s" % hostname)
    else:
        # strips off ending period and returns hostname
        return hostname[:-1]
Пример #10
0
def _resolveViaHost(ipAddr):
  """
  Performs a host lookup for the given IP, returning the resolved hostname.
  This raises an IOError if the lookup fails (os or network issue), and a
  ValueError in the case of DNS errors (address is unresolvable).
  
  Arguments:
    ipAddr - ip address to be resolved
  """
  
  hostname = system.call("host %s" % ipAddr)[0].split()[-1:][0]
  
  if hostname == "reached":
    # got message: ";; connection timed out; no servers could be reached"
    raise IOError("lookup timed out")
  elif hostname in DNS_ERROR_CODES:
    # got error response (can't do resolution on address)
    raise ValueError("address is unresolvable: %s" % hostname)
  else:
    # strips off ending period and returns hostname
    return hostname[:-1]
Пример #11
0
 def prepopulateFromState(self):
   """
   Attempts to use tor's state file to prepopulate values for the 15 minute
   interval via the BWHistoryReadValues/BWHistoryWriteValues values. This
   returns True if successful and False otherwise.
   """
   
   # checks that this is a relay (if ORPort is unset, then skip)
   conn = torTools.getConn()
   orPort = conn.getOption("ORPort", None)
   if orPort == "0": return
   
   # gets the uptime (using the same parameters as the header panel to take
   # advantage of caching)
   # TODO: stem dropped system caching support so we'll need to think of
   # something else
   uptime = None
   queryPid = conn.getMyPid()
   if queryPid:
     queryParam = ["%cpu", "rss", "%mem", "etime"]
     queryCmd = "ps -p %s -o %s" % (queryPid, ",".join(queryParam))
     psCall = system.call(queryCmd, None)
     
     if psCall and len(psCall) == 2:
       stats = psCall[1].strip().split()
       if len(stats) == 4: uptime = stats[3]
   
   # checks if tor has been running for at least a day, the reason being that
   # the state tracks a day's worth of data and this should only prepopulate
   # results associated with this tor instance
   if not uptime or not "-" in uptime:
     msg = PREPOPULATE_FAILURE_MSG % "insufficient uptime"
     log.notice(msg)
     return False
   
   # get the user's data directory (usually '~/.tor')
   dataDir = conn.getOption("DataDirectory", None)
   if not dataDir:
     msg = PREPOPULATE_FAILURE_MSG % "data directory not found"
     log.notice(msg)
     return False
   
   # attempt to open the state file
   try: stateFile = open("%s%s/state" % (conn.getPathPrefix(), dataDir), "r")
   except IOError:
     msg = PREPOPULATE_FAILURE_MSG % "unable to read the state file"
     log.notice(msg)
     return False
   
   # get the BWHistory entries (ordered oldest to newest) and number of
   # intervals since last recorded
   bwReadEntries, bwWriteEntries = None, None
   missingReadEntries, missingWriteEntries = None, None
   
   # converts from gmt to local with respect to DST
   tz_offset = time.altzone if time.localtime()[8] else time.timezone
   
   for line in stateFile:
     line = line.strip()
     
     # According to the rep_hist_update_state() function the BWHistory*Ends
     # correspond to the start of the following sampling period. Also, the
     # most recent values of BWHistory*Values appear to be an incremental
     # counter for the current sampling period. Hence, offsets are added to
     # account for both.
     
     if line.startswith("BWHistoryReadValues"):
       bwReadEntries = line[20:].split(",")
       bwReadEntries = [int(entry) / 1024.0 / 900 for entry in bwReadEntries]
       bwReadEntries.pop()
     elif line.startswith("BWHistoryWriteValues"):
       bwWriteEntries = line[21:].split(",")
       bwWriteEntries = [int(entry) / 1024.0 / 900 for entry in bwWriteEntries]
       bwWriteEntries.pop()
     elif line.startswith("BWHistoryReadEnds"):
       lastReadTime = time.mktime(time.strptime(line[18:], "%Y-%m-%d %H:%M:%S")) - tz_offset
       lastReadTime -= 900
       missingReadEntries = int((time.time() - lastReadTime) / 900)
     elif line.startswith("BWHistoryWriteEnds"):
       lastWriteTime = time.mktime(time.strptime(line[19:], "%Y-%m-%d %H:%M:%S")) - tz_offset
       lastWriteTime -= 900
       missingWriteEntries = int((time.time() - lastWriteTime) / 900)
   
   if not bwReadEntries or not bwWriteEntries or not lastReadTime or not lastWriteTime:
     msg = PREPOPULATE_FAILURE_MSG % "bandwidth stats missing from state file"
     log.notice(msg)
     return False
   
   # fills missing entries with the last value
   bwReadEntries += [bwReadEntries[-1]] * missingReadEntries
   bwWriteEntries += [bwWriteEntries[-1]] * missingWriteEntries
   
   # crops starting entries so they're the same size
   entryCount = min(len(bwReadEntries), len(bwWriteEntries), self.maxCol)
   bwReadEntries = bwReadEntries[len(bwReadEntries) - entryCount:]
   bwWriteEntries = bwWriteEntries[len(bwWriteEntries) - entryCount:]
   
   # gets index for 15-minute interval
   intervalIndex = 0
   for indexEntry in graphPanel.UPDATE_INTERVALS:
     if indexEntry[1] == 900: break
     else: intervalIndex += 1
   
   # fills the graphing parameters with state information
   for i in range(entryCount):
     readVal, writeVal = bwReadEntries[i], bwWriteEntries[i]
     
     self.lastPrimary, self.lastSecondary = readVal, writeVal
     
     self.prepopulatePrimaryTotal += readVal * 900
     self.prepopulateSecondaryTotal += writeVal * 900
     self.prepopulateTicks += 900
     
     self.primaryCounts[intervalIndex].insert(0, readVal)
     self.secondaryCounts[intervalIndex].insert(0, writeVal)
   
   self.maxPrimary[intervalIndex] = max(self.primaryCounts)
   self.maxSecondary[intervalIndex] = max(self.secondaryCounts)
   del self.primaryCounts[intervalIndex][self.maxCol + 1:]
   del self.secondaryCounts[intervalIndex][self.maxCol + 1:]
   
   msg = PREPOPULATE_SUCCESS_MSG
   missingSec = time.time() - min(lastReadTime, lastWriteTime)
   if missingSec: msg += " (%s is missing)" % str_tools.get_time_label(missingSec, 0, True)
   log.notice(msg)
   
   return True
Пример #12
0
 """
 
 if resolutionCmd == Resolver.PROC:
   # Attempts resolution via checking the proc contents.
   if not processPid:
     raise ValueError("proc resolution requires a pid")
   
   try:
     return proc.get_connections(processPid)
   except Exception, exc:
     raise IOError(str(exc))
 else:
   # Queries a resolution utility (netstat, lsof, etc). This raises an
   # IOError if the command fails or isn't available.
   cmd, cmd_filter = getResolverCommand(resolutionCmd, processName, processPid)
   results = system.call(cmd)
   results = filter(cmd_filter, results)
   
   if not results: raise IOError("No results found using: %s" % cmd)
   
   # parses results for the resolution command
   conn = []
   for line in results:
     if resolutionCmd == Resolver.LSOF:
       # Different versions of lsof have different numbers of columns, so
       # stripping off the optional 'established' entry so we can just use
       # the last one.
       comp = line.replace("(ESTABLISHED)", "").strip().split()
     else: comp = line.split()
     
     if resolutionCmd == Resolver.NETSTAT:
Пример #13
0
def loadOptionDescriptions(loadPath=None, checkVersion=True):
    """
  Fetches and parses descriptions for tor's configuration options from its man
  page. This can be a somewhat lengthy call, and raises an IOError if issues
  occure. When successful loading from a file this returns the version for the
  contents loaded.
  
  If available, this can load the configuration descriptions from a file where
  they were previously persisted to cut down on the load time (latency for this
  is around 200ms).
  
  Arguments:
    loadPath     - if set, this attempts to fetch the configuration
                   descriptions from the given path instead of the man page
    checkVersion - discards the results if true and tor's version doens't
                   match the cached descriptors, otherwise accepts anyway
  """

    CONFIG_DESCRIPTIONS_LOCK.acquire()
    CONFIG_DESCRIPTIONS.clear()

    raisedExc = None
    loadedVersion = ""
    try:
        if loadPath:
            # Input file is expected to be of the form:
            # <option>
            # <arg description>
            # <description, possibly multiple lines>
            # <PERSIST_ENTRY_DIVIDER>
            inputFile = open(loadPath, "r")
            inputFileContents = inputFile.readlines()
            inputFile.close()

            try:
                versionLine = inputFileContents.pop(0).rstrip()

                if versionLine.startswith("Tor Version "):
                    fileVersion = versionLine[12:]
                    loadedVersion = fileVersion
                    torVersion = torTools.getConn().getInfo("version", "")

                    if checkVersion and fileVersion != torVersion:
                        msg = "wrong version, tor is %s but the file's from %s" % (
                            torVersion, fileVersion)
                        raise IOError(msg)
                else:
                    raise IOError("unable to parse version")

                while inputFileContents:
                    # gets category enum, failing if it doesn't exist
                    category = inputFileContents.pop(0).rstrip()
                    if not category in Category:
                        baseMsg = "invalid category in input file: '%s'"
                        raise IOError(baseMsg % category)

                    # gets the position in the man page
                    indexArg, indexStr = -1, inputFileContents.pop(0).rstrip()

                    if indexStr.startswith("index: "):
                        indexStr = indexStr[7:]

                        if indexStr.isdigit(): indexArg = int(indexStr)
                        else:
                            raise IOError("non-numeric index value: %s" %
                                          indexStr)
                    else:
                        raise IOError("malformed index argument: %s" %
                                      indexStr)

                    option = inputFileContents.pop(0).rstrip()
                    argument = inputFileContents.pop(0).rstrip()

                    description, loadedLine = "", inputFileContents.pop(0)
                    while loadedLine != PERSIST_ENTRY_DIVIDER:
                        description += loadedLine

                        if inputFileContents:
                            loadedLine = inputFileContents.pop(0)
                        else:
                            break

                    CONFIG_DESCRIPTIONS[option.lower()] = ManPageEntry(
                        option, indexArg, category, argument,
                        description.rstrip())
            except IndexError:
                CONFIG_DESCRIPTIONS.clear()
                raise IOError("input file format is invalid")
        else:
            manCallResults = system.call("man tor")

            if not manCallResults:
                raise IOError("man page not found")

            # Fetches all options available with this tor instance. This isn't
            # vital, and the validOptions are left empty if the call fails.
            conn, validOptions = torTools.getConn(), []
            configOptionQuery = conn.getInfo("config/names", None)
            if configOptionQuery:
                for line in configOptionQuery.strip().split("\n"):
                    validOptions.append(line[:line.find(" ")].lower())

            optionCount, lastOption, lastArg = 0, None, None
            lastCategory, lastDescription = Category.GENERAL, ""
            for line in manCallResults:
                line = uiTools.getPrintable(line)
                strippedLine = line.strip()

                # we have content, but an indent less than an option (ignore line)
                #if strippedLine and not line.startswith(" " * MAN_OPT_INDENT): continue

                # line starts with an indent equivilant to a new config option
                isOptIndent = line.startswith(
                    " " * MAN_OPT_INDENT) and line[MAN_OPT_INDENT] != " "

                isCategoryLine = not line.startswith(" ") and "OPTIONS" in line

                # if this is a category header or a new option, add an entry using the
                # buffered results
                if isOptIndent or isCategoryLine:
                    # Filters the line based on if the option is recognized by tor or
                    # not. This isn't necessary for arm, so if unable to make the check
                    # then we skip filtering (no loss, the map will just have some extra
                    # noise).
                    strippedDescription = lastDescription.strip()
                    if lastOption and (not validOptions
                                       or lastOption.lower() in validOptions):
                        CONFIG_DESCRIPTIONS[lastOption.lower()] = ManPageEntry(
                            lastOption, optionCount, lastCategory, lastArg,
                            strippedDescription)
                        optionCount += 1
                    lastDescription = ""

                    # parses the option and argument
                    line = line.strip()
                    divIndex = line.find(" ")
                    if divIndex != -1:
                        lastOption, lastArg = line[:divIndex], line[divIndex +
                                                                    1:]

                    # if this is a category header then switch it
                    if isCategoryLine:
                        if line.startswith("OPTIONS"):
                            lastCategory = Category.GENERAL
                        elif line.startswith("CLIENT"):
                            lastCategory = Category.CLIENT
                        elif line.startswith("SERVER"):
                            lastCategory = Category.RELAY
                        elif line.startswith("DIRECTORY SERVER"):
                            lastCategory = Category.DIRECTORY
                        elif line.startswith("DIRECTORY AUTHORITY SERVER"):
                            lastCategory = Category.AUTHORITY
                        elif line.startswith("HIDDEN SERVICE"):
                            lastCategory = Category.HIDDEN_SERVICE
                        elif line.startswith("TESTING NETWORK"):
                            lastCategory = Category.TESTING
                        else:
                            log.notice(
                                "Unrecognized category in the man page: %s" %
                                line.strip())
                else:
                    # Appends the text to the running description. Empty lines and lines
                    # starting with a specific indentation are used for formatting, for
                    # instance the ExitPolicy and TestingTorNetwork entries.
                    if lastDescription and lastDescription[-1] != "\n":
                        lastDescription += " "

                    if not strippedLine:
                        lastDescription += "\n\n"
                    elif line.startswith(" " * MAN_EX_INDENT):
                        lastDescription += "    %s\n" % strippedLine
                    else:
                        lastDescription += strippedLine
    except IOError, exc:
        raisedExc = exc
Пример #14
0
        return names
      else:
        return []

    test.output.print_noline("  copying stem to '%s'... " % python3_destination, *test.runner.STATUS_ATTR)
    shutil.copytree('stem', os.path.join(python3_destination, 'stem'))
    shutil.copytree('test', os.path.join(python3_destination, 'test'), ignore = _ignore)
    shutil.copy('run_tests.py', os.path.join(python3_destination, 'run_tests.py'))
    test.output.print_line("done", *test.runner.STATUS_ATTR)
  except OSError, exc:
    test.output.print_line("failed\n%s" % exc, *test.runner.ERROR_ATTR)
    return False

  try:
    test.output.print_noline("  running 2to3... ", *test.runner.STATUS_ATTR)
    system.call("2to3 --write --nobackups --no-diffs %s" % python3_destination)
    test.output.print_line("done", *test.runner.STATUS_ATTR)
  except OSError, exc:
    test.output.print_line("failed\n%s" % exc, *test.runner.ERROR_ATTR)
    return False

  return True


def _print_style_issues():
  base_path = os.path.sep.join(__file__.split(os.path.sep)[:-1]).lstrip("./")
  style_issues = test.static_checks.get_issues(os.path.join(base_path, "stem"))
  style_issues.update(test.static_checks.get_issues(os.path.join(base_path, "test")))
  style_issues.update(test.static_checks.get_issues(os.path.join(base_path, "run_tests.py")))

  # If we're doing some sort of testing (unit or integ) and pyflakes is
Пример #15
0
def _process_for_ports(local_ports, remote_ports):
    """
  Provides the name of the process using the given ports.

  :param list local_ports: local port numbers to look up
  :param list remote_ports: remote port numbers to look up

  :returns: **dict** mapping the ports to the associated **Process**, or
    **None** if it can't be determined

  :raises: **IOError** if unsuccessful
  """
    def _parse_lsof_line(line):
        line_comp = line.split()

        if not line:
            return None, None, None, None  # blank line
        elif len(line_comp) != 10:
            raise ValueError('lines are expected to have ten fields: %s' %
                             line)
        elif line_comp[9] != '(ESTABLISHED)':
            return None, None, None, None  # connection isn't established
        elif not line_comp[1].isdigit():
            raise ValueError(
                'expected the pid (which is the second value) to be an integer: %s'
                % line)

        pid = int(line_comp[1])
        cmd = line_comp[0]
        port_map = line_comp[8]

        if '->' not in port_map:
            raise ValueError(
                "'%s' is expected to be a '->' separated mapping" % port_map)

        local, remote = port_map.split('->', 1)

        if ':' not in local or ':' not in remote:
            raise ValueError("'%s' is expected to be 'address:port' entries" %
                             port_map)

        local_port = local.split(':', 1)[1]
        remote_port = remote.split(':', 1)[1]

        if not connection.is_valid_port(local_port):
            raise ValueError("'%s' isn't a valid port" % local_port)
        elif not connection.is_valid_port(remote_port):
            raise ValueError("'%s' isn't a valid port" % remote_port)

        return int(local_port), int(remote_port), pid, cmd

    # atagar@fenrir:~/Desktop/nyx$ lsof -i tcp:51849 -i tcp:37277
    # COMMAND  PID   USER   FD   TYPE DEVICE SIZE/OFF NODE NAME
    # tor     2001 atagar   14u  IPv4  14048      0t0  TCP localhost:9051->localhost:37277 (ESTABLISHED)
    # tor     2001 atagar   15u  IPv4  22024      0t0  TCP localhost:9051->localhost:51849 (ESTABLISHED)
    # python  2462 atagar    3u  IPv4  14047      0t0  TCP localhost:37277->localhost:9051 (ESTABLISHED)
    # python  3444 atagar    3u  IPv4  22023      0t0  TCP localhost:51849->localhost:9051 (ESTABLISHED)

    try:
        lsof_cmd = 'lsof -nP ' + ' '.join(
            ['-i tcp:%s' % port for port in (local_ports + remote_ports)])
        lsof_call = system.call(lsof_cmd)
    except OSError as exc:
        raise IOError(exc)

    if lsof_call:
        results = {}

        if lsof_call[0].startswith('COMMAND'):
            lsof_call = lsof_call[1:]  # strip the title line

        for line in lsof_call:
            try:
                local_port, remote_port, pid, cmd = _parse_lsof_line(line)

                if local_port in local_ports:
                    results[local_port] = Process(pid, cmd)
                elif remote_port in remote_ports:
                    results[remote_port] = Process(pid, cmd)
            except ValueError as exc:
                raise IOError('unrecognized output from lsof (%s): %s' %
                              (exc, line))

        for unknown_port in set(local_ports).union(remote_ports).difference(
                results.keys()):
            results[unknown_port] = None

        return results

    raise IOError('no results from lsof')
Пример #16
0
 def _queryApplications(self, ports=[]):
   """
   Performs an lsof lookup on the given ports to get the command/pid tuples.
   
   Arguments:
     ports - list of ports to be resolved to applications
   """
   
   # atagar@fenrir:~/Desktop/arm$ lsof -i tcp:51849 -i tcp:37277
   # COMMAND  PID   USER   FD   TYPE DEVICE SIZE/OFF NODE NAME
   # tor     2001 atagar   14u  IPv4  14048      0t0  TCP localhost:9051->localhost:37277 (ESTABLISHED)
   # tor     2001 atagar   15u  IPv4  22024      0t0  TCP localhost:9051->localhost:51849 (ESTABLISHED)
   # python  2462 atagar    3u  IPv4  14047      0t0  TCP localhost:37277->localhost:9051 (ESTABLISHED)
   # python  3444 atagar    3u  IPv4  22023      0t0  TCP localhost:51849->localhost:9051 (ESTABLISHED)
   
   if not ports:
     self.resultsLock.acquire()
     self.queryResults = {}
     self.isResolving = False
     self.resultsLock.release()
     
     # wakes threads waiting on results
     self._cond.acquire()
     self._cond.notifyAll()
     self._cond.release()
     
     return
   
   results = {}
   lsofArgs = []
   
   # Uses results from the last query if we have any, otherwise appends the
   # port to the lsof command. This has the potential for persisting dirty
   # results but if we're querying by the dynamic port on the local tcp
   # connections then this should be very rare (and definitely worth the
   # chance of being able to skip an lsof query altogether).
   for port in ports:
     if port in self.queryResults:
       results[port] = self.queryResults[port]
     else: lsofArgs.append("-i tcp:%s" % port)
   
   if lsofArgs:
     lsofResults = system.call("lsof -nP " + " ".join(lsofArgs))
   else: lsofResults = None
   
   if not lsofResults and self.failureCount != -1:
     # lsof query failed and we aren't yet sure if it's possible to
     # successfully get results on this platform
     self.failureCount += 1
     self.isResolving = False
     return
   elif lsofResults:
     # (iPort, oPort) tuple for our own process, if it was fetched
     ourConnection = None
     
     for line in lsofResults:
       lineComp = line.split()
       
       if len(lineComp) == 10 and lineComp[9] == "(ESTABLISHED)":
         cmd, pid, _, _, _, _, _, _, portMap, _ = lineComp
         
         if "->" in portMap:
           iPort, oPort = portMap.split("->")
           iPort = iPort.split(":")[1]
           oPort = oPort.split(":")[1]
           
           # entry belongs to our own process
           if pid == str(os.getpid()):
             cmd = self.scriptName
             ourConnection = (iPort, oPort)
           
           if iPort.isdigit() and oPort.isdigit():
             newEntry = (iPort, oPort, cmd, pid)
             
             # adds the entry under the key of whatever we queried it with
             # (this might be both the inbound _and_ outbound ports)
             for portMatch in (iPort, oPort):
               if portMatch in ports:
                 if portMatch in results:
                   results[portMatch].append(newEntry)
                 else: results[portMatch] = [newEntry]
     
     # making the lsof call generated an extraneous sh entry for our own connection
     if ourConnection:
       for ourPort in ourConnection:
         if ourPort in results:
           shIndex = None
           
           for i in range(len(results[ourPort])):
             if results[ourPort][i][2] == "sh":
               shIndex = i
               break
           
           if shIndex != None:
             del results[ourPort][shIndex]
   
   self.resultsLock.acquire()
   self.failureCount = -1
   self.queryResults = results
   self.isResolving = False
   self.resultsLock.release()
   
   # wakes threads waiting on results
   self._cond.acquire()
   self._cond.notifyAll()
   self._cond.release()
Пример #17
0
def _process_for_ports(local_ports, remote_ports):
  """
  Provides the name of the process using the given ports.

  :param list local_ports: local port numbers to look up
  :param list remote_ports: remote port numbers to look up

  :returns: **dict** mapping the ports to the associated **Process**, or
    **None** if it can't be determined

  :raises: **IOError** if unsuccessful
  """

  def _parse_lsof_line(line):
    line_comp = line.split()

    if not line:
      return None, None, None, None  # blank line
    elif len(line_comp) != 10:
      raise ValueError('lines are expected to have ten fields: %s' % line)
    elif line_comp[9] != '(ESTABLISHED)':
      return None, None, None, None  # connection isn't established
    elif not line_comp[1].isdigit():
      raise ValueError('expected the pid (which is the second value) to be an integer: %s' % line)

    pid = int(line_comp[1])
    cmd = line_comp[0]
    port_map = line_comp[8]

    if '->' not in port_map:
      raise ValueError("'%s' is expected to be a '->' separated mapping" % port_map)

    local, remote = port_map.split('->', 1)

    if ':' not in local or ':' not in remote:
      raise ValueError("'%s' is expected to be 'address:port' entries" % port_map)

    local_port = local.split(':', 1)[1]
    remote_port = remote.split(':', 1)[1]

    if not connection.is_valid_port(local_port):
      raise ValueError("'%s' isn't a valid port" % local_port)
    elif not connection.is_valid_port(remote_port):
      raise ValueError("'%s' isn't a valid port" % remote_port)

    return int(local_port), int(remote_port), pid, cmd

  # atagar@fenrir:~/Desktop/nyx$ lsof -i tcp:51849 -i tcp:37277
  # COMMAND  PID   USER   FD   TYPE DEVICE SIZE/OFF NODE NAME
  # tor     2001 atagar   14u  IPv4  14048      0t0  TCP localhost:9051->localhost:37277 (ESTABLISHED)
  # tor     2001 atagar   15u  IPv4  22024      0t0  TCP localhost:9051->localhost:51849 (ESTABLISHED)
  # python  2462 atagar    3u  IPv4  14047      0t0  TCP localhost:37277->localhost:9051 (ESTABLISHED)
  # python  3444 atagar    3u  IPv4  22023      0t0  TCP localhost:51849->localhost:9051 (ESTABLISHED)

  try:
    lsof_cmd = 'lsof -nP ' + ' '.join(['-i tcp:%s' % port for port in (local_ports + remote_ports)])
    lsof_call = system.call(lsof_cmd)
  except OSError as exc:
    raise IOError(exc)

  if lsof_call:
    results = {}

    if lsof_call[0].startswith('COMMAND  '):
      lsof_call = lsof_call[1:]  # strip the title line

    for line in lsof_call:
      try:
        local_port, remote_port, pid, cmd = _parse_lsof_line(line)

        if local_port in local_ports:
          results[local_port] = Process(pid, cmd)
        elif remote_port in remote_ports:
          results[remote_port] = Process(pid, cmd)
      except ValueError as exc:
        raise IOError('unrecognized output from lsof (%s): %s' % (exc, line))

    for unknown_port in set(local_ports).union(remote_ports).difference(results.keys()):
      results[unknown_port] = None

    return results

  raise IOError('no results from lsof')
Пример #18
0
    def prepopulateFromState(self):
        """
    Attempts to use tor's state file to prepopulate values for the 15 minute
    interval via the BWHistoryReadValues/BWHistoryWriteValues values. This
    returns True if successful and False otherwise.
    """

        # checks that this is a relay (if ORPort is unset, then skip)
        conn = torTools.getConn()
        orPort = conn.getOption("ORPort", None)
        if orPort == "0": return

        # gets the uptime (using the same parameters as the header panel to take
        # advantage of caching)
        # TODO: stem dropped system caching support so we'll need to think of
        # something else
        uptime = None
        queryPid = conn.getMyPid()
        if queryPid:
            queryParam = ["%cpu", "rss", "%mem", "etime"]
            queryCmd = "ps -p %s -o %s" % (queryPid, ",".join(queryParam))
            psCall = system.call(queryCmd, None)

            if psCall and len(psCall) == 2:
                stats = psCall[1].strip().split()
                if len(stats) == 4: uptime = stats[3]

        # checks if tor has been running for at least a day, the reason being that
        # the state tracks a day's worth of data and this should only prepopulate
        # results associated with this tor instance
        if not uptime or not "-" in uptime:
            msg = PREPOPULATE_FAILURE_MSG % "insufficient uptime"
            log.notice(msg)
            return False

        # get the user's data directory (usually '~/.tor')
        dataDir = conn.getOption("DataDirectory", None)
        if not dataDir:
            msg = PREPOPULATE_FAILURE_MSG % "data directory not found"
            log.notice(msg)
            return False

        # attempt to open the state file
        try:
            stateFile = open("%s%s/state" % (conn.getPathPrefix(), dataDir),
                             "r")
        except IOError:
            msg = PREPOPULATE_FAILURE_MSG % "unable to read the state file"
            log.notice(msg)
            return False

        # get the BWHistory entries (ordered oldest to newest) and number of
        # intervals since last recorded
        bwReadEntries, bwWriteEntries = None, None
        missingReadEntries, missingWriteEntries = None, None

        # converts from gmt to local with respect to DST
        tz_offset = time.altzone if time.localtime()[8] else time.timezone

        for line in stateFile:
            line = line.strip()

            # According to the rep_hist_update_state() function the BWHistory*Ends
            # correspond to the start of the following sampling period. Also, the
            # most recent values of BWHistory*Values appear to be an incremental
            # counter for the current sampling period. Hence, offsets are added to
            # account for both.

            if line.startswith("BWHistoryReadValues"):
                bwReadEntries = line[20:].split(",")
                bwReadEntries = [
                    int(entry) / 1024.0 / 900 for entry in bwReadEntries
                ]
                bwReadEntries.pop()
            elif line.startswith("BWHistoryWriteValues"):
                bwWriteEntries = line[21:].split(",")
                bwWriteEntries = [
                    int(entry) / 1024.0 / 900 for entry in bwWriteEntries
                ]
                bwWriteEntries.pop()
            elif line.startswith("BWHistoryReadEnds"):
                lastReadTime = time.mktime(
                    time.strptime(line[18:], "%Y-%m-%d %H:%M:%S")) - tz_offset
                lastReadTime -= 900
                missingReadEntries = int((time.time() - lastReadTime) / 900)
            elif line.startswith("BWHistoryWriteEnds"):
                lastWriteTime = time.mktime(
                    time.strptime(line[19:], "%Y-%m-%d %H:%M:%S")) - tz_offset
                lastWriteTime -= 900
                missingWriteEntries = int((time.time() - lastWriteTime) / 900)

        if not bwReadEntries or not bwWriteEntries or not lastReadTime or not lastWriteTime:
            msg = PREPOPULATE_FAILURE_MSG % "bandwidth stats missing from state file"
            log.notice(msg)
            return False

        # fills missing entries with the last value
        bwReadEntries += [bwReadEntries[-1]] * missingReadEntries
        bwWriteEntries += [bwWriteEntries[-1]] * missingWriteEntries

        # crops starting entries so they're the same size
        entryCount = min(len(bwReadEntries), len(bwWriteEntries), self.maxCol)
        bwReadEntries = bwReadEntries[len(bwReadEntries) - entryCount:]
        bwWriteEntries = bwWriteEntries[len(bwWriteEntries) - entryCount:]

        # gets index for 15-minute interval
        intervalIndex = 0
        for indexEntry in graphPanel.UPDATE_INTERVALS:
            if indexEntry[1] == 900: break
            else: intervalIndex += 1

        # fills the graphing parameters with state information
        for i in range(entryCount):
            readVal, writeVal = bwReadEntries[i], bwWriteEntries[i]

            self.lastPrimary, self.lastSecondary = readVal, writeVal

            self.prepopulatePrimaryTotal += readVal * 900
            self.prepopulateSecondaryTotal += writeVal * 900
            self.prepopulateTicks += 900

            self.primaryCounts[intervalIndex].insert(0, readVal)
            self.secondaryCounts[intervalIndex].insert(0, writeVal)

        self.maxPrimary[intervalIndex] = max(self.primaryCounts)
        self.maxSecondary[intervalIndex] = max(self.secondaryCounts)
        del self.primaryCounts[intervalIndex][self.maxCol + 1:]
        del self.secondaryCounts[intervalIndex][self.maxCol + 1:]

        msg = PREPOPULATE_SUCCESS_MSG
        missingSec = time.time() - min(lastReadTime, lastWriteTime)
        if missingSec:
            msg += " (%s is missing)" % str_tools.get_time_label(
                missingSec, 0, True)
        log.notice(msg)

        return True
Пример #19
0
    processName, raisedExc = "", None

    # fetch it from proc contents if available
    if proc.is_available():
        try:
            processName = proc.get_stats(pid, proc.Stat.COMMAND)[0]
        except IOError, exc:
            raisedExc = exc

    # fall back to querying via ps
    if not processName:
        # the ps call formats results as:
        # COMMAND
        # tor
        psCall = system.call("ps -p %s -o command" % pid)

        if psCall and len(psCall) >= 2 and not " " in psCall[1]:
            processName, raisedExc = psCall[1].strip(), None
        else:
            raisedExc = ValueError("Unexpected output from ps: %s" % psCall)

    if raisedExc:
        if default == None: raise raisedExc
        else:
            if cacheFailure:
                PROCESS_NAME_CACHE[pid] = default

            return default
    else:
        processName = os.path.basename(processName)
Пример #20
0
def loadOptionDescriptions(loadPath = None, checkVersion = True):
  """
  Fetches and parses descriptions for tor's configuration options from its man
  page. This can be a somewhat lengthy call, and raises an IOError if issues
  occure. When successful loading from a file this returns the version for the
  contents loaded.
  
  If available, this can load the configuration descriptions from a file where
  they were previously persisted to cut down on the load time (latency for this
  is around 200ms).
  
  Arguments:
    loadPath     - if set, this attempts to fetch the configuration
                   descriptions from the given path instead of the man page
    checkVersion - discards the results if true and tor's version doens't
                   match the cached descriptors, otherwise accepts anyway
  """
  
  CONFIG_DESCRIPTIONS_LOCK.acquire()
  CONFIG_DESCRIPTIONS.clear()
  
  raisedExc = None
  loadedVersion = ""
  try:
    if loadPath:
      # Input file is expected to be of the form:
      # <option>
      # <arg description>
      # <description, possibly multiple lines>
      # <PERSIST_ENTRY_DIVIDER>
      inputFile = open(loadPath, "r")
      inputFileContents = inputFile.readlines()
      inputFile.close()
      
      try:
        versionLine = inputFileContents.pop(0).rstrip()
        
        if versionLine.startswith("Tor Version "):
          fileVersion = versionLine[12:]
          loadedVersion = fileVersion
          torVersion = torTools.getConn().getInfo("version", "")
          
          if checkVersion and fileVersion != torVersion:
            msg = "wrong version, tor is %s but the file's from %s" % (torVersion, fileVersion)
            raise IOError(msg)
        else:
          raise IOError("unable to parse version")
        
        while inputFileContents:
          # gets category enum, failing if it doesn't exist
          category = inputFileContents.pop(0).rstrip()
          if not category in Category:
            baseMsg = "invalid category in input file: '%s'"
            raise IOError(baseMsg % category)
          
          # gets the position in the man page
          indexArg, indexStr = -1, inputFileContents.pop(0).rstrip()
          
          if indexStr.startswith("index: "):
            indexStr = indexStr[7:]
            
            if indexStr.isdigit(): indexArg = int(indexStr)
            else: raise IOError("non-numeric index value: %s" % indexStr)
          else: raise IOError("malformed index argument: %s"% indexStr)
          
          option = inputFileContents.pop(0).rstrip()
          argument = inputFileContents.pop(0).rstrip()
          
          description, loadedLine = "", inputFileContents.pop(0)
          while loadedLine != PERSIST_ENTRY_DIVIDER:
            description += loadedLine
            
            if inputFileContents: loadedLine = inputFileContents.pop(0)
            else: break
          
          CONFIG_DESCRIPTIONS[option.lower()] = ManPageEntry(option, indexArg, category, argument, description.rstrip())
      except IndexError:
        CONFIG_DESCRIPTIONS.clear()
        raise IOError("input file format is invalid")
    else:
      manCallResults = system.call("man tor")
      
      if not manCallResults:
        raise IOError("man page not found")
      
      # Fetches all options available with this tor instance. This isn't
      # vital, and the validOptions are left empty if the call fails.
      conn, validOptions = torTools.getConn(), []
      configOptionQuery = conn.getInfo("config/names", None)
      if configOptionQuery:
        for line in configOptionQuery.strip().split("\n"):
          validOptions.append(line[:line.find(" ")].lower())
      
      optionCount, lastOption, lastArg = 0, None, None
      lastCategory, lastDescription = Category.GENERAL, ""
      for line in manCallResults:
        line = uiTools.getPrintable(line)
        strippedLine = line.strip()
        
        # we have content, but an indent less than an option (ignore line)
        #if strippedLine and not line.startswith(" " * MAN_OPT_INDENT): continue
        
        # line starts with an indent equivilant to a new config option
        isOptIndent = line.startswith(" " * MAN_OPT_INDENT) and line[MAN_OPT_INDENT] != " "
        
        isCategoryLine = not line.startswith(" ") and "OPTIONS" in line
        
        # if this is a category header or a new option, add an entry using the
        # buffered results
        if isOptIndent or isCategoryLine:
          # Filters the line based on if the option is recognized by tor or
          # not. This isn't necessary for arm, so if unable to make the check
          # then we skip filtering (no loss, the map will just have some extra
          # noise).
          strippedDescription = lastDescription.strip()
          if lastOption and (not validOptions or lastOption.lower() in validOptions):
            CONFIG_DESCRIPTIONS[lastOption.lower()] = ManPageEntry(lastOption, optionCount, lastCategory, lastArg, strippedDescription)
            optionCount += 1
          lastDescription = ""
          
          # parses the option and argument
          line = line.strip()
          divIndex = line.find(" ")
          if divIndex != -1:
            lastOption, lastArg = line[:divIndex], line[divIndex + 1:]
          
          # if this is a category header then switch it
          if isCategoryLine:
            if line.startswith("OPTIONS"): lastCategory = Category.GENERAL
            elif line.startswith("CLIENT"): lastCategory = Category.CLIENT
            elif line.startswith("SERVER"): lastCategory = Category.RELAY
            elif line.startswith("DIRECTORY SERVER"): lastCategory = Category.DIRECTORY
            elif line.startswith("DIRECTORY AUTHORITY SERVER"): lastCategory = Category.AUTHORITY
            elif line.startswith("HIDDEN SERVICE"): lastCategory = Category.HIDDEN_SERVICE
            elif line.startswith("TESTING NETWORK"): lastCategory = Category.TESTING
            else:
              log.notice("Unrecognized category in the man page: %s" % line.strip())
        else:
          # Appends the text to the running description. Empty lines and lines
          # starting with a specific indentation are used for formatting, for
          # instance the ExitPolicy and TestingTorNetwork entries.
          if lastDescription and lastDescription[-1] != "\n":
            lastDescription += " "
          
          if not strippedLine:
            lastDescription += "\n\n"
          elif line.startswith(" " * MAN_EX_INDENT):
            lastDescription += "    %s\n" % strippedLine
          else: lastDescription += strippedLine
  except IOError, exc:
    raisedExc = exc
Пример #21
0
 def run(self):
   while not self._halt:
     timeSinceReset = time.time() - self.lastLookup
     
     if self.resolveRate == 0:
       self._cond.acquire()
       if not self._halt: self._cond.wait(0.2)
       self._cond.release()
       
       continue
     elif timeSinceReset < self.resolveRate:
       sleepTime = max(0.2, self.resolveRate - timeSinceReset)
       
       self._cond.acquire()
       if not self._halt: self._cond.wait(sleepTime)
       self._cond.release()
       
       continue # done waiting, try again
     
     newValues = {}
     try:
       if self._useProc:
         utime, stime, startTime = proc.get_stats(self.processPid, proc.Stat.CPU_UTIME, proc.Stat.CPU_STIME, proc.Stat.START_TIME)
         totalCpuTime = float(utime) + float(stime)
         cpuDelta = totalCpuTime - self._lastCpuTotal
         newValues["cpuSampling"] = cpuDelta / timeSinceReset
         newValues["cpuAvg"] = totalCpuTime / (time.time() - float(startTime))
         newValues["_lastCpuTotal"] = totalCpuTime
         
         memUsage = int(proc.get_memory_usage(self.processPid)[0])
         totalMemory = proc.get_physical_memory()
         newValues["memUsage"] = memUsage
         newValues["memUsagePercentage"] = float(memUsage) / totalMemory
       else:
         # the ps call formats results as:
         # 
         #     TIME     ELAPSED   RSS %MEM
         # 3-08:06:32 21-00:00:12 121844 23.5
         # 
         # or if Tor has only recently been started:
         # 
         #     TIME      ELAPSED    RSS %MEM
         #  0:04.40        37:57  18772  0.9
         
         psCall = system.call("ps -p %s -o cputime,etime,rss,%%mem" % self.processPid)
         
         isSuccessful = False
         if psCall and len(psCall) >= 2:
           stats = psCall[1].strip().split()
           
           if len(stats) == 4:
             try:
               totalCpuTime = str_tools.parse_short_time_label(stats[0])
               uptime = str_tools.parse_short_time_label(stats[1])
               cpuDelta = totalCpuTime - self._lastCpuTotal
               newValues["cpuSampling"] = cpuDelta / timeSinceReset
               newValues["cpuAvg"] = totalCpuTime / uptime
               newValues["_lastCpuTotal"] = totalCpuTime
               
               newValues["memUsage"] = int(stats[2]) * 1024 # ps size is in kb
               newValues["memUsagePercentage"] = float(stats[3]) / 100.0
               isSuccessful = True
             except ValueError, exc: pass
         
         if not isSuccessful:
           raise IOError("unrecognized output from ps: %s" % psCall)
     except IOError, exc:
       newValues = {}
       self._failureCount += 1
       
       if self._useProc:
         if self._failureCount >= 3:
           # We've failed three times resolving via proc. Warn, and fall back
           # to ps resolutions.
           log.info("Failed three attempts to get process resource usage from proc, falling back to ps (%s)" % exc)
           
           self._useProc = False
           self._failureCount = 1 # prevents lastQueryFailed() from thinking that we succeeded
         else:
           # wait a bit and try again
           log.debug("Unable to query process resource usage from proc (%s)" % exc)
           self._cond.acquire()
           if not self._halt: self._cond.wait(0.5)
           self._cond.release()
       else:
         # exponential backoff on making failed ps calls
         sleepTime = 0.01 * (2 ** self._failureCount) + self._failureCount
         log.debug("Unable to query process resource usage from ps, waiting %0.2f seconds (%s)" % (sleepTime, exc))
         self._cond.acquire()
         if not self._halt: self._cond.wait(sleepTime)
         self._cond.release()
     
     # sets the new values
     if newValues:
       # If this is the first run then the cpuSampling stat is meaningless
       # (there isn't a previous tick to sample from so it's zero at this
       # point). Setting it to the average, which is a fairer estimate.
       if self.lastLookup == -1:
         newValues["cpuSampling"] = newValues["cpuAvg"]
       
       self._valLock.acquire()
       self.cpuSampling = newValues["cpuSampling"]
       self.cpuAvg = newValues["cpuAvg"]
       self.memUsage = newValues["memUsage"]
       self.memUsagePercentage = newValues["memUsagePercentage"]
       self._lastCpuTotal = newValues["_lastCpuTotal"]
       self.lastLookup = time.time()
       self._runCount += 1
       self._failureCount = 0
       self._valLock.release()
Пример #22
0
 
 processName, raisedExc = "", None
 
 # fetch it from proc contents if available
 if proc.is_available():
   try:
     processName = proc.get_stats(pid, proc.Stat.COMMAND)[0]
   except IOError, exc:
     raisedExc = exc
 
 # fall back to querying via ps
 if not processName:
   # the ps call formats results as:
   # COMMAND
   # tor
   psCall = system.call("ps -p %s -o command" % pid)
   
   if psCall and len(psCall) >= 2 and not " " in psCall[1]:
     processName, raisedExc = psCall[1].strip(), None
   else:
     raisedExc = ValueError("Unexpected output from ps: %s" % psCall)
 
 if raisedExc:
   if default == None: raise raisedExc
   else:
     if cacheFailure:
       PROCESS_NAME_CACHE[pid] = default
     
     return default
 else:
   processName = os.path.basename(processName)
Пример #23
0
    def run(self):
        while not self._halt:
            timeSinceReset = time.time() - self.lastLookup

            if self.resolveRate == 0:
                self._cond.acquire()
                if not self._halt: self._cond.wait(0.2)
                self._cond.release()

                continue
            elif timeSinceReset < self.resolveRate:
                sleepTime = max(0.2, self.resolveRate - timeSinceReset)

                self._cond.acquire()
                if not self._halt: self._cond.wait(sleepTime)
                self._cond.release()

                continue  # done waiting, try again

            newValues = {}
            try:
                if self._useProc:
                    utime, stime, startTime = proc.get_stats(
                        self.processPid, proc.Stat.CPU_UTIME,
                        proc.Stat.CPU_STIME, proc.Stat.START_TIME)
                    totalCpuTime = float(utime) + float(stime)
                    cpuDelta = totalCpuTime - self._lastCpuTotal
                    newValues["cpuSampling"] = cpuDelta / timeSinceReset
                    newValues["cpuAvg"] = totalCpuTime / (time.time() -
                                                          float(startTime))
                    newValues["_lastCpuTotal"] = totalCpuTime

                    memUsage = int(proc.get_memory_usage(self.processPid)[0])
                    totalMemory = proc.get_physical_memory()
                    newValues["memUsage"] = memUsage
                    newValues["memUsagePercentage"] = float(
                        memUsage) / totalMemory
                else:
                    # the ps call formats results as:
                    #
                    #     TIME     ELAPSED   RSS %MEM
                    # 3-08:06:32 21-00:00:12 121844 23.5
                    #
                    # or if Tor has only recently been started:
                    #
                    #     TIME      ELAPSED    RSS %MEM
                    #  0:04.40        37:57  18772  0.9

                    psCall = system.call(
                        "ps -p %s -o cputime,etime,rss,%%mem" %
                        self.processPid)

                    isSuccessful = False
                    if psCall and len(psCall) >= 2:
                        stats = psCall[1].strip().split()

                        if len(stats) == 4:
                            try:
                                totalCpuTime = str_tools.parse_short_time_label(
                                    stats[0])
                                uptime = str_tools.parse_short_time_label(
                                    stats[1])
                                cpuDelta = totalCpuTime - self._lastCpuTotal
                                newValues[
                                    "cpuSampling"] = cpuDelta / timeSinceReset
                                newValues["cpuAvg"] = totalCpuTime / uptime
                                newValues["_lastCpuTotal"] = totalCpuTime

                                newValues["memUsage"] = int(
                                    stats[2]) * 1024  # ps size is in kb
                                newValues["memUsagePercentage"] = float(
                                    stats[3]) / 100.0
                                isSuccessful = True
                            except ValueError, exc:
                                pass

                    if not isSuccessful:
                        raise IOError("unrecognized output from ps: %s" %
                                      psCall)
            except IOError, exc:
                newValues = {}
                self._failureCount += 1

                if self._useProc:
                    if self._failureCount >= 3:
                        # We've failed three times resolving via proc. Warn, and fall back
                        # to ps resolutions.
                        log.info(
                            "Failed three attempts to get process resource usage from proc, falling back to ps (%s)"
                            % exc)

                        self._useProc = False
                        self._failureCount = 1  # prevents lastQueryFailed() from thinking that we succeeded
                    else:
                        # wait a bit and try again
                        log.debug(
                            "Unable to query process resource usage from proc (%s)"
                            % exc)
                        self._cond.acquire()
                        if not self._halt: self._cond.wait(0.5)
                        self._cond.release()
                else:
                    # exponential backoff on making failed ps calls
                    sleepTime = 0.01 * (
                        2**self._failureCount) + self._failureCount
                    log.debug(
                        "Unable to query process resource usage from ps, waiting %0.2f seconds (%s)"
                        % (sleepTime, exc))
                    self._cond.acquire()
                    if not self._halt: self._cond.wait(sleepTime)
                    self._cond.release()

            # sets the new values
            if newValues:
                # If this is the first run then the cpuSampling stat is meaningless
                # (there isn't a previous tick to sample from so it's zero at this
                # point). Setting it to the average, which is a fairer estimate.
                if self.lastLookup == -1:
                    newValues["cpuSampling"] = newValues["cpuAvg"]

                self._valLock.acquire()
                self.cpuSampling = newValues["cpuSampling"]
                self.cpuAvg = newValues["cpuAvg"]
                self.memUsage = newValues["memUsage"]
                self.memUsagePercentage = newValues["memUsagePercentage"]
                self._lastCpuTotal = newValues["_lastCpuTotal"]
                self.lastLookup = time.time()
                self._runCount += 1
                self._failureCount = 0
                self._valLock.release()
Пример #24
0
def load_option_descriptions(load_path = None, check_version = True):
  """
  Fetches and parses descriptions for tor's configuration options from its man
  page. This can be a somewhat lengthy call, and raises an IOError if issues
  occure. When successful loading from a file this returns the version for the
  contents loaded.

  If available, this can load the configuration descriptions from a file where
  they were previously persisted to cut down on the load time (latency for this
  is around 200ms).

  Arguments:
    load_path     - if set, this attempts to fetch the configuration
                   descriptions from the given path instead of the man page
    check_version - discards the results if true and tor's version doens't
                   match the cached descriptors, otherwise accepts anyway
  """

  with CONFIG_DESCRIPTIONS_LOCK:
    CONFIG_DESCRIPTIONS.clear()

    raised_exc = None
    loaded_version = ''

    try:
      if load_path:
        # Input file is expected to be of the form:
        # <option>
        # <arg description>
        # <description, possibly multiple lines>
        # <PERSIST_ENTRY_DIVIDER>
        input_file = open(load_path, 'r')
        input_file_contents = input_file.readlines()
        input_file.close()

        try:
          version_line = input_file_contents.pop(0).rstrip()

          if version_line.startswith('Tor Version '):
            file_version = version_line[12:]
            loaded_version = file_version
            tor_version = tor_controller().get_info('version', '')

            if check_version and file_version != tor_version:
              msg = "wrong version, tor is %s but the file's from %s" % (tor_version, file_version)
              raise IOError(msg)
          else:
            raise IOError('unable to parse version')

          while input_file_contents:
            # gets category enum, failing if it doesn't exist
            category = input_file_contents.pop(0).rstrip()

            if category not in Category:
              base_msg = "invalid category in input file: '%s'"
              raise IOError(base_msg % category)

            # gets the position in the man page
            index_arg, index_str = -1, input_file_contents.pop(0).rstrip()

            if index_str.startswith('index: '):
              index_str = index_str[7:]

              if index_str.isdigit():
                index_arg = int(index_str)
              else:
                raise IOError('non-numeric index value: %s' % index_str)
            else:
              raise IOError('malformed index argument: %s' % index_str)

            option = input_file_contents.pop(0).rstrip()
            argument = input_file_contents.pop(0).rstrip()

            description, loaded_line = '', input_file_contents.pop(0)

            while loaded_line != PERSIST_ENTRY_DIVIDER:
              description += loaded_line

              if input_file_contents:
                loaded_line = input_file_contents.pop(0)
              else:
                break

            CONFIG_DESCRIPTIONS[option.lower()] = ManPageEntry(option, index_arg, category, argument, description.rstrip())
        except IndexError:
          CONFIG_DESCRIPTIONS.clear()
          raise IOError('input file format is invalid')
      else:
        man_call_results = system.call('man tor', None)

        if not man_call_results:
          raise IOError('man page not found')

        # Fetches all options available with this tor instance. This isn't
        # vital, and the valid_options are left empty if the call fails.

        controller, valid_options = tor_controller(), []
        config_option_query = controller.get_info('config/names', None)

        if config_option_query:
          for line in config_option_query.strip().split('\n'):
            valid_options.append(line[:line.find(' ')].lower())

        option_count, last_option, last_arg = 0, None, None
        last_category, last_description = Category.GENERAL, ''

        for line in man_call_results:
          line = codecs.latin_1_encode(line, 'replace')[0]
          line = ui_tools.get_printable(line)
          stripped_line = line.strip()

          # we have content, but an indent less than an option (ignore line)
          # if stripped_line and not line.startswith(' ' * MAN_OPT_INDENT): continue

          # line starts with an indent equivilant to a new config option

          is_opt_indent = line.startswith(' ' * MAN_OPT_INDENT) and line[MAN_OPT_INDENT] != ' '

          is_category_line = not line.startswith(' ') and 'OPTIONS' in line

          # if this is a category header or a new option, add an entry using the
          # buffered results

          if is_opt_indent or is_category_line:
            # Filters the line based on if the option is recognized by tor or
            # not. This isn't necessary for nyx, so if unable to make the check
            # then we skip filtering (no loss, the map will just have some extra
            # noise).

            stripped_description = last_description.strip()

            if last_option and (not valid_options or last_option.lower() in valid_options):
              CONFIG_DESCRIPTIONS[last_option.lower()] = ManPageEntry(last_option, option_count, last_category, last_arg, stripped_description)
              option_count += 1

            last_description = ''

            # parses the option and argument

            line = line.strip()
            div_index = line.find(' ')

            if div_index != -1:
              last_option, last_arg = line[:div_index], line[div_index + 1:]

            # if this is a category header then switch it

            if is_category_line:
              if line.startswith('OPTIONS'):
                last_category = Category.GENERAL
              elif line.startswith('CLIENT'):
                last_category = Category.CLIENT
              elif line.startswith('SERVER'):
                last_category = Category.RELAY
              elif line.startswith('DIRECTORY SERVER'):
                last_category = Category.DIRECTORY
              elif line.startswith('DIRECTORY AUTHORITY SERVER'):
                last_category = Category.AUTHORITY
              elif line.startswith('HIDDEN SERVICE'):
                last_category = Category.HIDDEN_SERVICE
              elif line.startswith('TESTING NETWORK'):
                last_category = Category.TESTING
              else:
                log.notice('Unrecognized category in the man page: %s' % line.strip())
          else:
            # Appends the text to the running description. Empty lines and lines
            # starting with a specific indentation are used for formatting, for
            # instance the ExitPolicy and TestingTorNetwork entries.

            if last_description and last_description[-1] != '\n':
              last_description += ' '

            if not stripped_line:
              last_description += '\n\n'
            elif line.startswith(' ' * MAN_EX_INDENT):
              last_description += '    %s\n' % stripped_line
            else:
              last_description += stripped_line
    except IOError as exc:
      raised_exc = exc

  if raised_exc:
    raise raised_exc
  else:
    return loaded_version