コード例 #1
0
ファイル: torConfig.py プロジェクト: zhou-kz/arm
def getConfigLocation():
  """
  Provides the location of the torrc, raising an IOError with the reason if the
  path can't be determined.
  """
  
  conn = torTools.getConn()
  configLocation = conn.getInfo("config-file")
  if not configLocation: raise IOError("unable to query the torrc location")
  
  # checks if this is a relative path, needing the tor pwd to be appended
  if configLocation[0] != "/":
    torPid = conn.getMyPid()
    failureMsg = "querying tor's pwd failed because %s"
    if not torPid: raise IOError(failureMsg % "we couldn't get the pid")
    
    try:
      # pwdx results are of the form:
      # 3799: /home/atagar
      # 5839: No such process
      results = sysTools.call("pwdx %s" % torPid)
      if not results:
        raise IOError(failureMsg % "pwdx didn't return any results")
      elif results[0].endswith("No such process"):
        raise IOError(failureMsg % ("pwdx reported no process for pid " + torPid))
      elif len(results) != 1 or results.count(" ") != 1:
        raise IOError(failureMsg % "we got unexpected output from pwdx")
      else:
        pwdPath = results[0][results[0].find(" ") + 1:]
        configLocation = "%s/%s" % (pwdPath, configLocation)
    except IOError, exc:
      raise IOError(failureMsg % ("the pwdx call failed: " + str(exc)))
コード例 #2
0
ファイル: psStats.py プロジェクト: zhou-kz/arm
 def eventTick(self):
   """
   Processes a ps event.
   """
   
   psResults = {} # mapping of stat names to their results
   if self.queryPid and self.queryParam and self.failedCount < FAILURE_THRESHOLD:
     queryCmd = "ps -p %s -o %s" % (self.queryPid, ",".join(self.queryParam))
     psCall = sysTools.call(queryCmd, self.cacheTime, True)
     
     if psCall and len(psCall) == 2:
       # ps provided results (first line is headers, second is stats)
       stats = psCall[1].strip().split()
       
       if len(self.queryParam) == len(stats):
         # we have a result to match each stat - constructs mapping
         psResults = dict([(self.queryParam[i], stats[i]) for i in range(len(stats))])
         self.failedCount = 0 # had a successful call - reset failure count
     
     if not psResults:
       # ps call failed, if we fail too many times sequentially then abandon
       # listing (probably due to invalid ps parameters)
       self.failedCount += 1
       
       if self.failedCount == FAILURE_THRESHOLD:
         msg = "failed several attempts to query '%s', abandoning ps graph" % queryCmd
         log.log(self._config["log.graph.ps.abandon"], msg)
   
   # if something fails (no pid, ps call failed, etc) then uses last results
   primary, secondary = self.lastPrimary, self.lastSecondary
   
   for isPrimary in (True, False):
     if isPrimary: statName = self._config["features.graph.ps.primaryStat"]
     else: statName = self._config["features.graph.ps.secondaryStat"]
     
     if statName in psResults:
       try:
         result = float(psResults[statName])
         
         # The 'rss' and 'size' parameters provide memory usage in KB. This is
         # scaled up to MB so the graph's y-high is a reasonable value.
         if statName in ("rss", "size"): result /= 1024.0
         
         if isPrimary: primary = result
         else: secondary = result
       except ValueError:
         if self.queryParam != HEADER_PS_PARAM:
           # custom stat provides non-numeric results - give a warning and stop querying it
           msg = "unable to use non-numeric ps stat '%s' for graphing" % statName
           log.log(self._config["log.graph.ps.invalidStat"], msg)
           self.queryParam.remove(statName)
   
   self._processEvent(primary, secondary)
コード例 #3
0
ファイル: connections.py プロジェクト: katmagic/arm
def getConnections(resolutionCmd, processName, processPid = ""):
  """
  Retrieves a list of the current connections for a given process, providing a
  tuple list of the form:
  [(local_ipAddr1, local_port1, foreign_ipAddr1, foreign_port1), ...]
  this raises an IOError if no connections are available or resolution fails
  (in most cases these appear identical). Common issues include:
    - insufficient permissions
    - resolution command is unavailable
    - usage of the command is non-standard (particularly an issue for BSD)
  
  Arguments:
    resolutionCmd - command to use in resolving the address
    processName   - name of the process for which connections are fetched
    processPid    - process ID (this helps improve accuracy)
  """
  
  
  # raises an IOError if the command fails or isn't available
  cmd = getResolverCommand(resolutionCmd, processName, processPid)
  results = sysTools.call(cmd)
  
  if not results: raise IOError("No results found using: %s" % cmd)
  
  # parses results for the resolution command
  conn = []
  for line in results:
    comp = line.split()
    
    if resolutionCmd == CMD_NETSTAT:
      localIp, localPort = comp[3].split(":")
      foreignIp, foreignPort = comp[4].split(":")
    elif resolutionCmd == CMD_SS:
      localIp, localPort = comp[4].split(":")
      foreignIp, foreignPort = comp[5].split(":")
    elif resolutionCmd == CMD_LSOF:
      local, foreign = comp[8].split("->")
      localIp, localPort = local.split(":")
      foreignIp, foreignPort = foreign.split(":")
    elif resolutionCmd == CMD_SOCKSTAT:
      localIp, localPort = comp[4].split(":")
      foreignIp, foreignPort = comp[5].split(":")
    elif resolutionCmd == CMD_BSD_SOCKSTAT:
      localIp, localPort = comp[5].split(":")
      foreignIp, foreignPort = comp[6].split(":")
    elif resolutionCmd == CMD_BSD_PROCSTAT:
      localIp, localPort = comp[9].split(":")
      foreignIp, foreignPort = comp[10].split(":")
    
    conn.append((localIp, localPort, foreignIp, foreignPort))
  
  return conn
コード例 #4
0
ファイル: connections.py プロジェクト: zhou-kz/arm
def getConnections(resolutionCmd, processName, processPid=""):
    """
  Retrieves a list of the current connections for a given process, providing a
  tuple list of the form:
  [(local_ipAddr1, local_port1, foreign_ipAddr1, foreign_port1), ...]
  this raises an IOError if no connections are available or resolution fails
  (in most cases these appear identical). Common issues include:
    - insufficient permissions
    - resolution command is unavailable
    - usage of the command is non-standard (particularly an issue for BSD)
  
  Arguments:
    resolutionCmd - command to use in resolving the address
    processName   - name of the process for which connections are fetched
    processPid    - process ID (this helps improve accuracy)
  """

    # raises an IOError if the command fails or isn't available
    cmd = getResolverCommand(resolutionCmd, processName, processPid)
    results = sysTools.call(cmd)

    if not results: raise IOError("No results found using: %s" % cmd)

    # parses results for the resolution command
    conn = []
    for line in results:
        comp = line.split()

        if resolutionCmd == CMD_NETSTAT:
            localIp, localPort = comp[3].split(":")
            foreignIp, foreignPort = comp[4].split(":")
        elif resolutionCmd == CMD_SS:
            localIp, localPort = comp[4].split(":")
            foreignIp, foreignPort = comp[5].split(":")
        elif resolutionCmd == CMD_LSOF:
            local, foreign = comp[8].split("->")
            localIp, localPort = local.split(":")
            foreignIp, foreignPort = foreign.split(":")
        elif resolutionCmd == CMD_SOCKSTAT:
            localIp, localPort = comp[4].split(":")
            foreignIp, foreignPort = comp[5].split(":")
        elif resolutionCmd == CMD_BSD_SOCKSTAT:
            localIp, localPort = comp[5].split(":")
            foreignIp, foreignPort = comp[6].split(":")
        elif resolutionCmd == CMD_BSD_PROCSTAT:
            localIp, localPort = comp[9].split(":")
            foreignIp, foreignPort = comp[10].split(":")

        conn.append((localIp, localPort, foreignIp, foreignPort))

    return conn
コード例 #5
0
def _resolveViaHost(ipAddr):
    """
  Performs a host lookup for the given IP, returning the resolved hostname.
  This raises an IOError if the lookup fails (os or network issue), and a
  ValueError in the case of DNS errors (address is unresolvable).
  
  Arguments:
    ipAddr - ip address to be resolved
  """

    hostname = sysTools.call("host %s" % ipAddr)[0].split()[-1:][0]

    if hostname == "reached":
        # got message: ";; connection timed out; no servers could be reached"
        raise IOError("lookup timed out")
    elif hostname in DNS_ERROR_CODES:
        # got error response (can't do resolution on address)
        raise ValueError("address is unresolvable: %s" % hostname)
    else:
        # strips off ending period and returns hostname
        return hostname[:-1]
コード例 #6
0
ファイル: hostnames.py プロジェクト: JustMe23/arm
def _resolveViaHost(ipAddr):
  """
  Performs a host lookup for the given IP, returning the resolved hostname.
  This raises an IOError if the lookup fails (os or network issue), and a
  ValueError in the case of DNS errors (address is unresolvable).
  
  Arguments:
    ipAddr - ip address to be resolved
  """
  
  hostname = sysTools.call("host %s" % ipAddr)[0].split()[-1:][0]
  
  if hostname == "reached":
    # got message: ";; connection timed out; no servers could be reached"
    raise IOError("lookup timed out")
  elif hostname in DNS_ERROR_CODES:
    # got error response (can't do resolution on address)
    raise ValueError("address is unresolvable: %s" % hostname)
  else:
    # strips off ending period and returns hostname
    return hostname[:-1]
コード例 #7
0
ファイル: wizard.py プロジェクト: fhawah12345/fhawah12345.mra
def showWizard():
    """
  Provides a series of prompts, allowing the user to spawn a customized tor
  instance.
  """

    if not sysTools.isAvailable("tor"):
        msg = "Unable to run the setup wizard. Is tor installed?"
        log.log(log.WARN, msg)
        return

    # gets tor's version
    torVersion = None
    try:
        versionQuery = sysTools.call("tor --version")

        for line in versionQuery:
            if line.startswith("Tor version "):
                torVersion = torTools.parseVersion(line.split(" ")[2])
                break
    except IOError, exc:
        log.log(log.INFO, "'tor --version' query failed: %s" % exc)
コード例 #8
0
ファイル: torTools.py プロジェクト: zhou-kz/arm
def getBsdJailId():
  """
  Get the FreeBSD jail id for the monitored Tor process.
  """
  
  # Output when called from a FreeBSD jail or when Tor isn't jailed:
  #   JID
  #    0
  # 
  # Otherwise it's something like:
  #   JID
  #    1
  
  torPid = getConn().getMyPid()
  psOutput = sysTools.call("ps -p %s -o jid" % torPid)
  
  if len(psOutput) == 2 and len(psOutput[1].split()) == 1:
    jid = psOutput[1].strip()
    if jid.isdigit(): return int(jid)
  
  log.log(CONFIG["log.unknownBsdJailId"], "Failed to figure out the FreeBSD jail id. Assuming 0.")
  return 0
コード例 #9
0
ファイル: wizard.py プロジェクト: JustMe23/arm
def showWizard():
  """
  Provides a series of prompts, allowing the user to spawn a customized tor
  instance.
  """
  
  if not sysTools.isAvailable("tor"):
    msg = "Unable to run the setup wizard. Is tor installed?"
    log.log(log.WARN, msg)
    return
  
  # gets tor's version
  torVersion = None
  try:
    versionQuery = sysTools.call("tor --version")
    
    for line in versionQuery:
      if line.startswith("Tor version "):
        torVersion = torTools.parseVersion(line.split(" ")[2])
        break
  except IOError, exc:
    log.log(log.INFO, "'tor --version' query failed: %s" % exc)
コード例 #10
0
ファイル: fileDescriptorPopup.py プロジェクト: zhou-kz/arm
    def __init__(self, torPid):
        self.fdFile, self.fdConn, self.fdMisc = [], [], []
        self.fdLimit = 0
        self.errorMsg = ""
        self.scroll = 0

        try:
            ulimitCall = None

            # retrieves list of open files, options are:
            # n = no dns lookups, p = by pid, -F = show fields (L = login name, n = opened files)
            # TODO: better rewrite to take advantage of sysTools

            if not sysTools.isAvailable("lsof"):
                raise Exception("error: lsof is unavailable")
            results = sysTools.call("lsof -np %s -F Ln" % torPid)

            # if we didn't get any results then tor's probably closed (keep defaults)
            if len(results) == 0: return

            torUser = results[1][1:]
            results = results[
                2:]  # skip first couple lines (pid listing and user)

            # splits descriptors into buckets according to their type
            descriptors = [entry[1:].strip() for entry in results
                           ]  # strips off first character (always an 'n')

            # checks if read failed due to permission issues
            isPermissionDenied = True
            for desc in descriptors:
                if "Permission denied" not in desc:
                    isPermissionDenied = False
                    break

            if isPermissionDenied:
                raise Exception("lsof error: Permission denied")

            for desc in descriptors:
                if os.path.exists(desc): self.fdFile.append(desc)
                elif desc[0] != "/" and ":" in desc: self.fdConn.append(desc)
                else: self.fdMisc.append(desc)

            self.fdFile.sort()
            self.fdConn.sort()
            self.fdMisc.sort()

            # This is guessing the open file limit. Unfortunately there's no way
            # (other than "/usr/proc/bin/pfiles pid | grep rlimit" under Solaris) to
            # get the file descriptor limit for an arbitrary process. What we need is
            # for the tor process to provide the return value of the "getrlimit"
            # function via a GET_INFO call.
            if torUser.strip() == "debian-tor":
                # probably loaded via /etc/init.d/tor which changes descriptor limit
                self.fdLimit = 8192
            else:
                # uses ulimit to estimate (-H is for hard limit, which is what tor uses)
                ulimitCall = os.popen("ulimit -Hn 2> /dev/null")
                results = ulimitCall.readlines()
                if len(results) == 0:
                    raise Exception("error: ulimit is unavailable")
                self.fdLimit = int(results[0])

                # can't use sysTools for this call because ulimit isn't in the path...
                # so how the **** am I to detect if it's available!
                #if not sysTools.isAvailable("ulimit"): raise Exception("error: ulimit is unavailable")
                #results = sysTools.call("ulimit -Hn")
                #if len(results) == 0: raise Exception("error: ulimit call failed")
                #self.fdLimit = int(results[0])
        except Exception, exc:
            # problem arose in calling or parsing lsof or ulimit calls
            self.errorMsg = str(exc)
コード例 #11
0
ファイル: wizard.py プロジェクト: fhawah12345/fhawah12345.mra
                        #   then use that
                        # - attempt sudo in case passwordless sudo is available
                        # - if all of the above fail then log instructions

                        if os.geteuid() == 0: runCommand = OVERRIDE_SCRIPT
                        elif os.path.exists(OVERRIDE_SETUID_SCRIPT):
                            runCommand = OVERRIDE_SETUID_SCRIPT
                        else:
                            # The -n argument to sudo is *supposed* to be available starting
                            # with 1.7.0 [1] however this is a dirty lie (Ubuntu 9.10 uses
                            # 1.7.0 and even has the option in its man page, but it doesn't
                            # work). Instead checking for version 1.7.1.
                            #
                            # [1] http://www.sudo.ws/pipermail/sudo-users/2009-January/003889.html

                            sudoVersionResult = sysTools.call("sudo -V")

                            # version output looks like "Sudo version 1.7.2p7"
                            if len(
                                    sudoVersionResult
                            ) == 1 and sudoVersionResult[0].count(" ") >= 2:
                                versionNum = 0

                                for comp in sudoVersionResult[0].split(
                                        " ")[2].split("."):
                                    if comp and comp[0].isdigit():
                                        versionNum = (10 *
                                                      versionNum) + int(comp)
                                    else:
                                        # invalid format
                                        log.log(
コード例 #12
0
ファイル: torConfig.py プロジェクト: zhou-kz/arm
def loadOptionDescriptions(loadPath = None):
  """
  Fetches and parses descriptions for tor's configuration options from its man
  page. This can be a somewhat lengthy call, and raises an IOError if issues
  occure.
  
  If available, this can load the configuration descriptions from a file where
  they were previously persisted to cut down on the load time (latency for this
  is around 200ms).
  
  Arguments:
    loadPath - if set, this attempts to fetch the configuration descriptions
               from the given path instead of the man page
  """
  
  CONFIG_DESCRIPTIONS_LOCK.acquire()
  CONFIG_DESCRIPTIONS.clear()
  
  raisedExc = None
  try:
    if loadPath:
      # Input file is expected to be of the form:
      # <option>
      # <arg description>
      # <description, possibly multiple lines>
      # <PERSIST_ENTRY_DIVIDER>
      inputFile = open(loadPath, "r")
      inputFileContents = inputFile.readlines()
      inputFile.close()
      
      # constructs a reverse mapping for categories
      strToCat = dict([(OPTION_CATEGORY_STR[cat], cat) for cat in OPTION_CATEGORY_STR])
      
      try:
        versionLine = inputFileContents.pop(0).rstrip()
        
        if versionLine.startswith("Tor Version "):
          fileVersion = versionLine[12:]
          torVersion = torTools.getConn().getInfo("version", "")
          if fileVersion != torVersion:
            msg = "wrong version, tor is %s but the file's from %s" % (torVersion, fileVersion)
            raise IOError(msg)
        else:
          raise IOError("unable to parse version")
        
        while inputFileContents:
          # gets category enum, failing if it doesn't exist
          categoryStr = inputFileContents.pop(0).rstrip()
          if categoryStr in strToCat:
            category = strToCat[categoryStr]
          else:
            baseMsg = "invalid category in input file: '%s'"
            raise IOError(baseMsg % categoryStr)
          
          # gets the position in the man page
          indexArg, indexStr = -1, inputFileContents.pop(0).rstrip()
          
          if indexStr.startswith("index: "):
            indexStr = indexStr[7:]
            
            if indexStr.isdigit(): indexArg = int(indexStr)
            else: raise IOError("non-numeric index value: %s" % indexStr)
          else: raise IOError("malformed index argument: %s"% indexStr)
          
          option = inputFileContents.pop(0).rstrip()
          argument = inputFileContents.pop(0).rstrip()
          
          description, loadedLine = "", inputFileContents.pop(0)
          while loadedLine != PERSIST_ENTRY_DIVIDER:
            description += loadedLine
            
            if inputFileContents: loadedLine = inputFileContents.pop(0)
            else: break
          
          CONFIG_DESCRIPTIONS[option.lower()] = ManPageEntry(indexArg, category, argument, description.rstrip())
      except IndexError:
        CONFIG_DESCRIPTIONS.clear()
        raise IOError("input file format is invalid")
    else:
      manCallResults = sysTools.call("man tor")
      
      # Fetches all options available with this tor instance. This isn't
      # vital, and the validOptions are left empty if the call fails.
      conn, validOptions = torTools.getConn(), []
      configOptionQuery = conn.getInfo("config/names").strip().split("\n")
      if configOptionQuery:
        validOptions = [line[:line.find(" ")].lower() for line in configOptionQuery]
      
      optionCount, lastOption, lastArg = 0, None, None
      lastCategory, lastDescription = GENERAL, ""
      for line in manCallResults:
        line = uiTools.getPrintable(line)
        strippedLine = line.strip()
        
        # we have content, but an indent less than an option (ignore line)
        #if strippedLine and not line.startswith(" " * MAN_OPT_INDENT): continue
        
        # line starts with an indent equivilant to a new config option
        isOptIndent = line.startswith(" " * MAN_OPT_INDENT) and line[MAN_OPT_INDENT] != " "
        
        isCategoryLine = not line.startswith(" ") and "OPTIONS" in line
        
        # if this is a category header or a new option, add an entry using the
        # buffered results
        if isOptIndent or isCategoryLine:
          # Filters the line based on if the option is recognized by tor or
          # not. This isn't necessary for arm, so if unable to make the check
          # then we skip filtering (no loss, the map will just have some extra
          # noise).
          strippedDescription = lastDescription.strip()
          if lastOption and (not validOptions or lastOption.lower() in validOptions):
            CONFIG_DESCRIPTIONS[lastOption.lower()] = ManPageEntry(optionCount, lastCategory, lastArg, strippedDescription)
            optionCount += 1
          lastDescription = ""
          
          # parses the option and argument
          line = line.strip()
          divIndex = line.find(" ")
          if divIndex != -1:
            lastOption, lastArg = line[:divIndex], line[divIndex + 1:]
          
          # if this is a category header then switch it
          if isCategoryLine:
            if line.startswith("OPTIONS"): lastCategory = GENERAL
            elif line.startswith("CLIENT"): lastCategory = CLIENT
            elif line.startswith("SERVER"): lastCategory = SERVER
            elif line.startswith("DIRECTORY SERVER"): lastCategory = DIRECTORY
            elif line.startswith("DIRECTORY AUTHORITY SERVER"): lastCategory = AUTHORITY
            elif line.startswith("HIDDEN SERVICE"): lastCategory = HIDDEN_SERVICE
            elif line.startswith("TESTING NETWORK"): lastCategory = TESTING
            else:
              msg = "Unrecognized category in the man page: %s" % line.strip()
              log.log(CONFIG["log.configDescriptions.unrecognizedCategory"], msg)
        else:
          # Appends the text to the running description. Empty lines and lines
          # starting with a specific indentation are used for formatting, for
          # instance the ExitPolicy and TestingTorNetwork entries.
          if lastDescription and lastDescription[-1] != "\n":
            lastDescription += " "
          
          if not strippedLine:
            lastDescription += "\n\n"
          elif line.startswith(" " * MAN_EX_INDENT):
            lastDescription += "    %s\n" % strippedLine
          else: lastDescription += strippedLine
  except IOError, exc:
    raisedExc = exc
コード例 #13
0
ファイル: torTools.py プロジェクト: zhou-kz/arm
 def _getRelayAttr(self, key, default, cacheUndefined = True):
   """
   Provides information associated with this relay, using the cached value if
   available and otherwise looking it up.
   
   Arguments:
     key            - parameter being queried (from CACHE_ARGS)
     default        - value to be returned if undefined
     cacheUndefined - caches when values are undefined, avoiding further
                      lookups if true
   """
   
   currentVal = self._cachedParam[key]
   if currentVal:
     if currentVal == UNKNOWN: return default
     else: return currentVal
   
   self.connLock.acquire()
   
   currentVal, result = self._cachedParam[key], None
   if not currentVal and self.isAlive():
     # still unset - fetch value
     if key in ("nsEntry", "descEntry"):
       myFingerprint = self.getInfo("fingerprint")
       
       if myFingerprint:
         queryType = "ns" if key == "nsEntry" else "desc"
         queryResult = self.getInfo("%s/id/%s" % (queryType, myFingerprint))
         if queryResult: result = queryResult.split("\n")
     elif key == "bwRate":
       # effective relayed bandwidth is the minimum of BandwidthRate,
       # MaxAdvertisedBandwidth, and RelayBandwidthRate (if set)
       effectiveRate = int(self.getOption("BandwidthRate"))
       
       relayRate = self.getOption("RelayBandwidthRate")
       if relayRate and relayRate != "0":
         effectiveRate = min(effectiveRate, int(relayRate))
       
       maxAdvertised = self.getOption("MaxAdvertisedBandwidth")
       if maxAdvertised: effectiveRate = min(effectiveRate, int(maxAdvertised))
       
       result = effectiveRate
     elif key == "bwBurst":
       # effective burst (same for BandwidthBurst and RelayBandwidthBurst)
       effectiveBurst = int(self.getOption("BandwidthBurst"))
       
       relayBurst = self.getOption("RelayBandwidthBurst")
       if relayBurst and relayBurst != "0":
         effectiveBurst = min(effectiveBurst, int(relayBurst))
       
       result = effectiveBurst
     elif key == "bwObserved":
       for line in self.getMyDescriptor([]):
         if line.startswith("bandwidth"):
           # line should look something like:
           # bandwidth 40960 102400 47284
           comp = line.split()
           
           if len(comp) == 4 and comp[-1].isdigit():
             result = int(comp[-1])
             break
     elif key == "bwMeasured":
       # TODO: Currently there's no client side indication of what type of
       # measurement was used. Include this in results if it's ever available.
       
       for line in self.getMyNetworkStatus([]):
         if line.startswith("w Bandwidth="):
           bwValue = line[12:]
           if bwValue.isdigit(): result = int(bwValue)
           break
     elif key == "flags":
       for line in self.getMyNetworkStatus([]):
         if line.startswith("s "):
           result = line[2:].split()
           break
     elif key == "pid":
       result = getPid(int(self.getOption("ControlPort", 9051)), self.getOption("PidFile"))
     elif key == "pathPrefix":
       # make sure the path prefix is valid and exists (providing a notice if not)
       prefixPath = CONFIG["features.pathPrefix"].strip()
       
       # adjusts the prefix path to account for jails under FreeBSD (many
       # thanks to Fabian Keil!)
       if not prefixPath and os.uname()[0] == "FreeBSD":
         jid = getBsdJailId()
         if jid != 0:
           # Output should be something like:
           #    JID  IP Address      Hostname      Path
           #      1  10.0.0.2        tor-jail      /usr/jails/tor-jail
           jlsOutput = sysTools.call("jls -j %s" % jid)
           
           if len(jlsOutput) == 2 and len(jlsOutput[1].split()) == 4:
             prefixPath = jlsOutput[1].split()[3]
             
             if self._pathPrefixLogging:
               msg = "Adjusting paths to account for Tor running in a jail at: %s" % prefixPath
               log.log(CONFIG["log.bsdJailFound"], msg)
       
       if prefixPath:
         # strips off ending slash from the path
         if prefixPath.endswith("/"): prefixPath = prefixPath[:-1]
         
         # avoid using paths that don't exist
         if self._pathPrefixLogging and prefixPath and not os.path.exists(prefixPath):
           msg = "The prefix path set in your config (%s) doesn't exist." % prefixPath
           log.log(CONFIG["log.torPrefixPathInvalid"], msg)
           prefixPath = ""
       
       self._pathPrefixLogging = False # prevents logging if fetched again
       result = prefixPath
     
     # cache value
     if result: self._cachedParam[key] = result
     elif cacheUndefined: self._cachedParam[key] = UNKNOWN
   elif currentVal == UNKNOWN: result = currentVal
   
   self.connLock.release()
   
   if result: return result
   else: return default
コード例 #14
0
ファイル: bandwidthStats.py プロジェクト: refnode/arm
 def prepopulateFromState(self):
   """
   Attempts to use tor's state file to prepopulate values for the 15 minute
   interval via the BWHistoryReadValues/BWHistoryWriteValues values. This
   returns True if successful and False otherwise.
   """
   
   # checks that this is a relay (if ORPort is unset, then skip)
   conn = torTools.getConn()
   orPort = conn.getOption("ORPort", None)
   if orPort == "0": return
   
   # gets the uptime (using the same parameters as the header panel to take
   # advantage of caching
   uptime = None
   queryPid = conn.getMyPid()
   if queryPid:
     queryParam = ["%cpu", "rss", "%mem", "etime"]
     queryCmd = "ps -p %s -o %s" % (queryPid, ",".join(queryParam))
     psCall = sysTools.call(queryCmd, 3600, True)
     
     if psCall and len(psCall) == 2:
       stats = psCall[1].strip().split()
       if len(stats) == 4: uptime = stats[3]
   
   # checks if tor has been running for at least a day, the reason being that
   # the state tracks a day's worth of data and this should only prepopulate
   # results associated with this tor instance
   if not uptime or not "-" in uptime:
     msg = PREPOPULATE_FAILURE_MSG % "insufficient uptime"
     log.log(self._config["log.graph.bw.prepopulateFailure"], msg)
     return False
   
   # get the user's data directory (usually '~/.tor')
   dataDir = conn.getOption("DataDirectory", None)
   if not dataDir:
     msg = PREPOPULATE_FAILURE_MSG % "data directory not found"
     log.log(self._config["log.graph.bw.prepopulateFailure"], msg)
     return False
   
   # attempt to open the state file
   try: stateFile = open("%s%s/state" % (conn.getPathPrefix(), dataDir), "r")
   except IOError:
     msg = PREPOPULATE_FAILURE_MSG % "unable to read the state file"
     log.log(self._config["log.graph.bw.prepopulateFailure"], msg)
     return False
   
   # get the BWHistory entries (ordered oldest to newest) and number of
   # intervals since last recorded
   bwReadEntries, bwWriteEntries = None, None
   missingReadEntries, missingWriteEntries = None, None
   
   # converts from gmt to local with respect to DST
   tz_offset = time.altzone if time.localtime()[8] else time.timezone
   
   for line in stateFile:
     line = line.strip()
     
     # According to the rep_hist_update_state() function the BWHistory*Ends
     # correspond to the start of the following sampling period. Also, the
     # most recent values of BWHistory*Values appear to be an incremental
     # counter for the current sampling period. Hence, offsets are added to
     # account for both.
     
     if line.startswith("BWHistoryReadValues"):
       bwReadEntries = line[20:].split(",")
       bwReadEntries = [int(entry) / 1024.0 / 900 for entry in bwReadEntries]
       bwReadEntries.pop()
     elif line.startswith("BWHistoryWriteValues"):
       bwWriteEntries = line[21:].split(",")
       bwWriteEntries = [int(entry) / 1024.0 / 900 for entry in bwWriteEntries]
       bwWriteEntries.pop()
     elif line.startswith("BWHistoryReadEnds"):
       lastReadTime = time.mktime(time.strptime(line[18:], "%Y-%m-%d %H:%M:%S")) - tz_offset
       lastReadTime -= 900
       missingReadEntries = int((time.time() - lastReadTime) / 900)
     elif line.startswith("BWHistoryWriteEnds"):
       lastWriteTime = time.mktime(time.strptime(line[19:], "%Y-%m-%d %H:%M:%S")) - tz_offset
       lastWriteTime -= 900
       missingWriteEntries = int((time.time() - lastWriteTime) / 900)
   
   if not bwReadEntries or not bwWriteEntries or not lastReadTime or not lastWriteTime:
     msg = PREPOPULATE_FAILURE_MSG % "bandwidth stats missing from state file"
     log.log(self._config["log.graph.bw.prepopulateFailure"], msg)
     return False
   
   # fills missing entries with the last value
   bwReadEntries += [bwReadEntries[-1]] * missingReadEntries
   bwWriteEntries += [bwWriteEntries[-1]] * missingWriteEntries
   
   # crops starting entries so they're the same size
   entryCount = min(len(bwReadEntries), len(bwWriteEntries), self.maxCol)
   bwReadEntries = bwReadEntries[len(bwReadEntries) - entryCount:]
   bwWriteEntries = bwWriteEntries[len(bwWriteEntries) - entryCount:]
   
   # gets index for 15-minute interval
   intervalIndex = 0
   for indexEntry in graphPanel.UPDATE_INTERVALS:
     if indexEntry[1] == 900: break
     else: intervalIndex += 1
   
   # fills the graphing parameters with state information
   for i in range(entryCount):
     readVal, writeVal = bwReadEntries[i], bwWriteEntries[i]
     
     self.lastPrimary, self.lastSecondary = readVal, writeVal
     
     self.prepopulatePrimaryTotal += readVal * 900
     self.prepopulateSecondaryTotal += writeVal * 900
     self.prepopulateTicks += 900
     
     self.primaryCounts[intervalIndex].insert(0, readVal)
     self.secondaryCounts[intervalIndex].insert(0, writeVal)
   
   self.maxPrimary[intervalIndex] = max(self.primaryCounts)
   self.maxSecondary[intervalIndex] = max(self.secondaryCounts)
   del self.primaryCounts[intervalIndex][self.maxCol + 1:]
   del self.secondaryCounts[intervalIndex][self.maxCol + 1:]
   
   msg = PREPOPULATE_SUCCESS_MSG
   missingSec = time.time() - min(lastReadTime, lastWriteTime)
   if missingSec: msg += " (%s is missing)" % uiTools.getTimeLabel(missingSec, 0, True)
   log.log(self._config["log.graph.bw.prepopulateSuccess"], msg)
   
   return True
コード例 #15
0
ファイル: logPanel.py プロジェクト: refnode/arm
def getLogFileEntries(runlevels, readLimit = None, addLimit = None, config = None):
  """
  Parses tor's log file for past events matching the given runlevels, providing
  a list of log entries (ordered newest to oldest). Limiting the number of read
  entries is suggested to avoid parsing everything from logs in the GB and TB
  range.
  
  Arguments:
    runlevels - event types (DEBUG - ERR) to be returned
    readLimit - max lines of the log file that'll be read (unlimited if None)
    addLimit  - maximum entries to provide back (unlimited if None)
    config    - configuration parameters related to this panel, uses defaults
                if left as None
  """
  
  startTime = time.time()
  if not runlevels: return []
  
  if not config: config = DEFAULT_CONFIG
  
  # checks tor's configuration for the log file's location (if any exists)
  loggingTypes, loggingLocation = None, None
  for loggingEntry in torTools.getConn().getOption("Log", [], True):
    # looks for an entry like: notice file /var/log/tor/notices.log
    entryComp = loggingEntry.split()
    
    if entryComp[1] == "file":
      loggingTypes, loggingLocation = entryComp[0], entryComp[2]
      break
  
  if not loggingLocation: return []
  
  # includes the prefix for tor paths
  loggingLocation = torTools.getConn().getPathPrefix() + loggingLocation
  
  # if the runlevels argument is a superset of the log file then we can
  # limit the read contents to the addLimit
  runlevels = log.Runlevel.values()
  loggingTypes = loggingTypes.upper()
  if addLimit and (not readLimit or readLimit > addLimit):
    if "-" in loggingTypes:
      divIndex = loggingTypes.find("-")
      sIndex = runlevels.index(loggingTypes[:divIndex])
      eIndex = runlevels.index(loggingTypes[divIndex+1:])
      logFileRunlevels = runlevels[sIndex:eIndex+1]
    else:
      sIndex = runlevels.index(loggingTypes)
      logFileRunlevels = runlevels[sIndex:]
    
    # checks if runlevels we're reporting are a superset of the file's contents
    isFileSubset = True
    for runlevelType in logFileRunlevels:
      if runlevelType not in runlevels:
        isFileSubset = False
        break
    
    if isFileSubset: readLimit = addLimit
  
  # tries opening the log file, cropping results to avoid choking on huge logs
  lines = []
  try:
    if readLimit:
      lines = sysTools.call("tail -n %i %s" % (readLimit, loggingLocation))
      if not lines: raise IOError()
    else:
      logFile = open(loggingLocation, "r")
      lines = logFile.readlines()
      logFile.close()
  except IOError:
    msg = "Unable to read tor's log file: %s" % loggingLocation
    log.log(config["log.logPanel.prepopulateFailed"], msg)
  
  if not lines: return []
  
  loggedEvents = []
  currentUnixTime, currentLocalTime = time.time(), time.localtime()
  for i in range(len(lines) - 1, -1, -1):
    line = lines[i]
    
    # entries look like:
    # Jul 15 18:29:48.806 [notice] Parsing GEOIP file.
    lineComp = line.split()
    
    # Checks that we have all the components we expect. This could happen if
    # we're either not parsing a tor log or in weird edge cases (like being
    # out of disk space)
    
    if len(lineComp) < 4: continue
    
    eventType = lineComp[3][1:-1].upper()
    
    if eventType in runlevels:
      # converts timestamp to unix time
      timestamp = " ".join(lineComp[:3])
      
      # strips the decimal seconds
      if "." in timestamp: timestamp = timestamp[:timestamp.find(".")]
      
      # Ignoring wday and yday since they aren't used.
      #
      # Pretend the year is 2012, because 2012 is a leap year, and parsing a
      # date with strptime fails if Feb 29th is passed without a year that's
      # actually a leap year. We can't just use the current year, because we
      # might be parsing old logs which didn't get rotated.
      #
      # https://trac.torproject.org/projects/tor/ticket/5265
      
      timestamp = "2012 " + timestamp
      eventTimeComp = list(time.strptime(timestamp, "%Y %b %d %H:%M:%S"))
      eventTimeComp[8] = currentLocalTime.tm_isdst
      eventTime = time.mktime(eventTimeComp) # converts local to unix time
      
      # The above is gonna be wrong if the logs are for the previous year. If
      # the event's in the future then correct for this.
      if eventTime > currentUnixTime + 60:
        eventTimeComp[0] -= 1
        eventTime = time.mktime(eventTimeComp)
      
      eventMsg = " ".join(lineComp[4:])
      loggedEvents.append(LogEntry(eventTime, eventType, eventMsg, RUNLEVEL_EVENT_COLOR[eventType]))
    
    if "opening log file" in line:
      break # this entry marks the start of this tor instance
  
  if addLimit: loggedEvents = loggedEvents[:addLimit]
  msg = "Read %i entries from tor's log file: %s (read limit: %i, runtime: %0.3f)" % (len(loggedEvents), loggingLocation, readLimit, time.time() - startTime)
  log.log(config["log.logPanel.prepopulateSuccess"], msg)
  return loggedEvents
コード例 #16
0
ファイル: logPanel.py プロジェクト: zhou-kz/arm
def getLogFileEntries(runlevels, readLimit=None, addLimit=None, config=None):
    """
  Parses tor's log file for past events matching the given runlevels, providing
  a list of log entries (ordered newest to oldest). Limiting the number of read
  entries is suggested to avoid parsing everything from logs in the GB and TB
  range.
  
  Arguments:
    runlevels - event types (DEBUG - ERR) to be returned
    readLimit - max lines of the log file that'll be read (unlimited if None)
    addLimit  - maximum entries to provide back (unlimited if None)
    config    - configuration parameters related to this panel, uses defaults
                if left as None
  """

    startTime = time.time()
    if not runlevels: return []

    if not config: config = DEFAULT_CONFIG

    # checks tor's configuration for the log file's location (if any exists)
    loggingTypes, loggingLocation = None, None
    for loggingEntry in torTools.getConn().getOption("Log", [], True):
        # looks for an entry like: notice file /var/log/tor/notices.log
        entryComp = loggingEntry.split()

        if entryComp[1] == "file":
            loggingTypes, loggingLocation = entryComp[0], entryComp[2]
            break

    if not loggingLocation: return []

    # includes the prefix for tor paths
    loggingLocation = torTools.getConn().getPathPrefix() + loggingLocation

    # if the runlevels argument is a superset of the log file then we can
    # limit the read contents to the addLimit
    loggingTypes = loggingTypes.upper()
    if addLimit and (not readLimit or readLimit > addLimit):
        if "-" in loggingTypes:
            divIndex = loggingTypes.find("-")
            sIndex = RUNLEVELS.index(loggingTypes[:divIndex])
            eIndex = RUNLEVELS.index(loggingTypes[divIndex + 1:])
            logFileRunlevels = RUNLEVELS[sIndex:eIndex + 1]
        else:
            sIndex = RUNLEVELS.index(loggingTypes)
            logFileRunlevels = RUNLEVELS[sIndex:]

        # checks if runlevels we're reporting are a superset of the file's contents
        isFileSubset = True
        for runlevelType in logFileRunlevels:
            if runlevelType not in runlevels:
                isFileSubset = False
                break

        if isFileSubset: readLimit = addLimit

    # tries opening the log file, cropping results to avoid choking on huge logs
    lines = []
    try:
        if readLimit:
            lines = sysTools.call("tail -n %i %s" %
                                  (readLimit, loggingLocation))
            if not lines: raise IOError()
        else:
            logFile = open(loggingLocation, "r")
            lines = logFile.readlines()
            logFile.close()
    except IOError:
        msg = "Unable to read tor's log file: %s" % loggingLocation
        log.log(config["log.logPanel.prepopulateFailed"], msg)

    if not lines: return []

    loggedEvents = []
    currentUnixTime, currentLocalTime = time.time(), time.localtime()
    for i in range(len(lines) - 1, -1, -1):
        line = lines[i]

        # entries look like:
        # Jul 15 18:29:48.806 [notice] Parsing GEOIP file.
        lineComp = line.split()
        eventType = lineComp[3][1:-1].upper()

        if eventType in runlevels:
            # converts timestamp to unix time
            timestamp = " ".join(lineComp[:3])

            # strips the decimal seconds
            if "." in timestamp: timestamp = timestamp[:timestamp.find(".")]

            # overwrites missing time parameters with the local time (ignoring wday
            # and yday since they aren't used)
            eventTimeComp = list(time.strptime(timestamp, "%b %d %H:%M:%S"))
            eventTimeComp[0] = currentLocalTime.tm_year
            eventTimeComp[8] = currentLocalTime.tm_isdst
            eventTime = time.mktime(
                eventTimeComp)  # converts local to unix time

            # The above is gonna be wrong if the logs are for the previous year. If
            # the event's in the future then correct for this.
            if eventTime > currentUnixTime + 60:
                eventTimeComp[0] -= 1
                eventTime = time.mktime(eventTimeComp)

            eventMsg = " ".join(lineComp[4:])
            loggedEvents.append(
                LogEntry(eventTime, eventType, eventMsg,
                         RUNLEVEL_EVENT_COLOR[eventType]))

        if "opening log file" in line:
            break  # this entry marks the start of this tor instance

    if addLimit: loggedEvents = loggedEvents[:addLimit]
    msg = "Read %i entries from tor's log file: %s (read limit: %i, runtime: %0.3f)" % (
        len(loggedEvents), loggingLocation, readLimit, time.time() - startTime)
    log.log(config["log.logPanel.prepopulateSuccess"], msg)
    return loggedEvents
コード例 #17
0
ファイル: wizard.py プロジェクト: JustMe23/arm
 # - if the setuid binary is available at '/usr/bin/torrc-override'
 #   then use that
 # - attempt sudo in case passwordless sudo is available
 # - if all of the above fail then log instructions
 
 if os.geteuid() == 0: runCommand = OVERRIDE_SCRIPT
 elif os.path.exists(OVERRIDE_SETUID_SCRIPT): runCommand = OVERRIDE_SETUID_SCRIPT
 else:
   # The -n argument to sudo is *supposed* to be available starting
   # with 1.7.0 [1] however this is a dirty lie (Ubuntu 9.10 uses
   # 1.7.0 and even has the option in its man page, but it doesn't
   # work). Instead checking for version 1.7.1.
   #
   # [1] http://www.sudo.ws/pipermail/sudo-users/2009-January/003889.html
   
   sudoVersionResult = sysTools.call("sudo -V")
   
   # version output looks like "Sudo version 1.7.2p7"
   if len(sudoVersionResult) == 1 and sudoVersionResult[0].count(" ") >= 2:
     versionNum = 0
     
     for comp in sudoVersionResult[0].split(" ")[2].split("."):
       if comp and comp[0].isdigit():
         versionNum = (10 * versionNum) + int(comp)
       else:
         # invalid format
         log.log(log.INFO, "Unrecognized sudo version string: %s" % sudoVersionResult[0])
         versionNum = 0
         break
     
     if versionNum >= 171:
コード例 #18
0
ファイル: fileDescriptorPopup.py プロジェクト: katmagic/arm
 def __init__(self, torPid):
   self.fdFile, self.fdConn, self.fdMisc = [], [], []
   self.fdLimit = 0
   self.errorMsg = ""
   self.scroll = 0
   
   try:
     ulimitCall = None
     
     # retrieves list of open files, options are:
     # n = no dns lookups, p = by pid, -F = show fields (L = login name, n = opened files)
     # TODO: better rewrite to take advantage of sysTools
     
     if not sysTools.isAvailable("lsof"): raise Exception("error: lsof is unavailable")
     results = sysTools.call("lsof -np %s -F Ln" % torPid)
     
     # if we didn't get any results then tor's probably closed (keep defaults)
     if len(results) == 0: return
     
     torUser = results[1][1:]
     results = results[2:] # skip first couple lines (pid listing and user)
     
     # splits descriptors into buckets according to their type
     descriptors = [entry[1:].strip() for entry in results] # strips off first character (always an 'n')
     
     # checks if read failed due to permission issues
     isPermissionDenied = True
     for desc in descriptors:
       if "Permission denied" not in desc:
         isPermissionDenied = False
         break
     
     if isPermissionDenied:
       raise Exception("lsof error: Permission denied")
     
     for desc in descriptors:
       if os.path.exists(desc): self.fdFile.append(desc)
       elif desc[0] != "/" and ":" in desc: self.fdConn.append(desc)
       else: self.fdMisc.append(desc)
     
     self.fdFile.sort()
     self.fdConn.sort()
     self.fdMisc.sort()
     
     # This is guessing the open file limit. Unfortunately there's no way
     # (other than "/usr/proc/bin/pfiles pid | grep rlimit" under Solaris) to
     # get the file descriptor limit for an arbitrary process. What we need is
     # for the tor process to provide the return value of the "getrlimit"
     # function via a GET_INFO call.
     if torUser.strip() == "debian-tor":
       # probably loaded via /etc/init.d/tor which changes descriptor limit
       self.fdLimit = 8192
     else:
       # uses ulimit to estimate (-H is for hard limit, which is what tor uses)
       ulimitCall = os.popen("ulimit -Hn 2> /dev/null")
       results = ulimitCall.readlines()
       if len(results) == 0: raise Exception("error: ulimit is unavailable")
       self.fdLimit = int(results[0])
       
       # can't use sysTools for this call because ulimit isn't in the path...
       # so how the **** am I to detect if it's available!
       #if not sysTools.isAvailable("ulimit"): raise Exception("error: ulimit is unavailable")
       #results = sysTools.call("ulimit -Hn")
       #if len(results) == 0: raise Exception("error: ulimit call failed")
       #self.fdLimit = int(results[0])
   except Exception, exc:
     # problem arose in calling or parsing lsof or ulimit calls
     self.errorMsg = str(exc)
コード例 #19
0
ファイル: headerPanel.py プロジェクト: katmagic/arm
 def _update(self, setStatic=False):
   """
   Updates stats in the vals mapping. By default this just revises volatile
   attributes.
   
   Arguments:
     setStatic - resets all parameters, including relatively static values
   """
   
   self.valsLock.acquire()
   conn = torTools.getConn()
   
   if setStatic:
     # version is truncated to first part, for instance:
     # 0.2.2.13-alpha (git-feb8c1b5f67f2c6f) -> 0.2.2.13-alpha
     self.vals["tor/version"] = conn.getInfo("version", "Unknown").split()[0]
     self.vals["tor/versionStatus"] = conn.getInfo("status/version/current", "Unknown")
     self.vals["tor/nickname"] = conn.getOption("Nickname", "")
     self.vals["tor/orPort"] = conn.getOption("ORPort", "0")
     self.vals["tor/dirPort"] = conn.getOption("DirPort", "0")
     self.vals["tor/controlPort"] = conn.getOption("ControlPort", "")
     self.vals["tor/isAuthPassword"] = conn.getOption("HashedControlPassword") != None
     self.vals["tor/isAuthCookie"] = conn.getOption("CookieAuthentication") == "1"
     
     # orport is reported as zero if unset
     if self.vals["tor/orPort"] == "0": self.vals["tor/orPort"] = ""
     
     # overwrite address if ORListenAddress is set (and possibly orPort too)
     self.vals["tor/address"] = "Unknown"
     listenAddr = conn.getOption("ORListenAddress")
     if listenAddr:
       if ":" in listenAddr:
         # both ip and port overwritten
         self.vals["tor/address"] = listenAddr[:listenAddr.find(":")]
         self.vals["tor/orPort"] = listenAddr[listenAddr.find(":") + 1:]
       else:
         self.vals["tor/address"] = listenAddr
     
     # fetch exit policy (might span over multiple lines)
     policyEntries = []
     for exitPolicy in conn.getOption("ExitPolicy", [], True):
       policyEntries += [policy.strip() for policy in exitPolicy.split(",")]
     self.vals["tor/exitPolicy"] = ", ".join(policyEntries)
     
     # system information
     unameVals = os.uname()
     self.vals["sys/hostname"] = unameVals[1]
     self.vals["sys/os"] = unameVals[0]
     self.vals["sys/version"] = unameVals[2]
     
     pid = conn.getMyPid()
     self.vals["stat/pid"] = pid if pid else ""
     
     # reverts volatile parameters to defaults
     self.vals["tor/fingerprint"] = "Unknown"
     self.vals["tor/flags"] = []
     self.vals["stat/%torCpu"] = "0"
     self.vals["stat/%armCpu"] = "0"
     self.vals["stat/rss"] = "0"
     self.vals["stat/%mem"] = "0"
     self.vals["stat/etime"] = ""
   
   # sets volatile parameters
   volatile = {}
   
   # TODO: This can change, being reported by STATUS_SERVER -> EXTERNAL_ADDRESS
   # events. Introduce caching via torTools?
   if self.vals["tor/address"] == "Unknown":
     volatile["tor/address"] = conn.getInfo("address", self.vals["tor/address"])
   
   volatile["tor/fingerprint"] = conn.getInfo("fingerprint", self.vals["tor/fingerprint"])
   volatile["tor/flags"] = conn.getMyFlags(self.vals["tor/flags"])
   
   # ps derived stats
   psParams = ["%cpu", "rss", "%mem", "etime"]
   if self.vals["stat/pid"]:
     # if call fails then everything except etime are zeroed out (most likely
     # tor's no longer running)
     volatile["stat/%torCpu"] = "0"
     volatile["stat/rss"] = "0"
     volatile["stat/%mem"] = "0"
     
     # the ps call formats results as:
     # %CPU   RSS %MEM     ELAPSED
     # 0.3 14096  1.3       29:51
     psRate = self._config["queries.ps.rate"]
     psCall = sysTools.call("ps -p %s -o %s" % (self.vals["stat/pid"], ",".join(psParams)), psRate, True)
     
     if psCall and len(psCall) >= 2:
       stats = psCall[1].strip().split()
       
       if len(stats) == len(psParams):
         for i in range(len(psParams)):
           paramName = psParams[i]
           if paramName == "%cpu": paramName = "%torCpu"
           volatile["stat/" + paramName] = stats[i]
   
   # determines the cpu time for the arm process (including user and system
   # time of both the primary and child processes)
   
   currentTime, totalCpuTime = time.time(), sum(os.times()[:3])
   cpuDelta = totalCpuTime - self._armCpuSampling[0]
   timeDelta = currentTime - self._armCpuSampling[1]
   self.vals["stat/%armCpu"] = "%0.1f" % (100 * cpuDelta / timeDelta)
   self._armCpuSampling = (totalCpuTime, currentTime)
   
   # checks if any changes have been made and merges volatile into vals
   
   self._isChanged |= setStatic
   for key, val in volatile.items():
     self._isChanged |= self.vals[key] != val
     self.vals[key] = val
   
   self._lastUpdate = currentTime
   self.valsLock.release()
コード例 #20
0
ファイル: torTools.py プロジェクト: zhou-kz/arm
def getPid(controlPort=9051, pidFilePath=None):
  """
  Attempts to determine the process id for a running tor process, using the
  following:
  1. GETCONF PidFile
  2. "pgrep -x tor"
  3. "pidof tor"
  4. "netstat -npl | grep 127.0.0.1:%s" % <tor control port>
  5. "ps -o pid -C tor"
  6. "sockstat -4l -P tcp -p %i | grep tor" % <tor control port>
  
  If pidof or ps provide multiple tor instances then their results are
  discarded (since only netstat can differentiate using the control port). This
  provides None if either no running process exists or it can't be determined.
  
  Arguments:
    controlPort - control port of the tor process if multiple exist
    pidFilePath - path to the pid file generated by tor
  """
  
  # attempts to fetch via the PidFile, failing if:
  # - the option is unset
  # - unable to read the file (such as insufficient permissions)
  
  if pidFilePath:
    try:
      pidFile = open(pidFilePath, "r")
      pidEntry = pidFile.readline().strip()
      pidFile.close()
      
      if pidEntry.isdigit(): return pidEntry
    except: pass
  
  # attempts to resolve using pgrep, failing if:
  # - tor is running under a different name
  # - there are multiple instances of tor
  try:
    results = sysTools.call("pgrep -x tor")
    if len(results) == 1 and len(results[0].split()) == 1:
      pid = results[0].strip()
      if pid.isdigit(): return pid
  except IOError: pass
  
  # attempts to resolve using pidof, failing if:
  # - tor's running under a different name
  # - there's multiple instances of tor
  try:
    results = sysTools.call("pidof tor")
    if len(results) == 1 and len(results[0].split()) == 1:
      pid = results[0].strip()
      if pid.isdigit(): return pid
  except IOError: pass
  
  # attempts to resolve using netstat, failing if:
  # - tor's being run as a different user due to permissions
  try:
    results = sysTools.call("netstat -npl | grep 127.0.0.1:%i" % controlPort)
    
    if len(results) == 1:
      results = results[0].split()[6] # process field (ex. "7184/tor")
      pid = results[:results.find("/")]
      if pid.isdigit(): return pid
  except IOError: pass
  
  # attempts to resolve using ps, failing if:
  # - tor's running under a different name
  # - there's multiple instances of tor
  try:
    results = sysTools.call("ps -o pid -C tor")
    if len(results) == 2:
      pid = results[1].strip()
      if pid.isdigit(): return pid
  except IOError: pass
  
  # attempts to resolve using sockstat, failing if:
  # - sockstat doesn't accept the -4 flag (BSD only)
  # - tor is running under a different name
  # - there are multiple instances of Tor, using the
  #   same control port on different addresses.
  # 
  # TODO: the later two issues could be solved by filtering for the control
  # port IP address instead of the process name.
  try:
    results = sysTools.call("sockstat -4l -P tcp -p %i | grep tor" % controlPort)
    if len(results) == 1 and len(results[0].split()) == 7:
      pid = results[0].split()[2]
      if pid.isdigit(): return pid
  except IOError: pass
  
  return None
コード例 #21
0
ファイル: connections.py プロジェクト: JustMe23/arm
 def _queryApplications(self, ports=[]):
   """
   Performs an lsof lookup on the given ports to get the command/pid tuples.
   
   Arguments:
     ports - list of ports to be resolved to applications
   """
   
   # atagar@fenrir:~/Desktop/arm$ lsof -i tcp:51849 -i tcp:37277
   # COMMAND  PID   USER   FD   TYPE DEVICE SIZE/OFF NODE NAME
   # tor     2001 atagar   14u  IPv4  14048      0t0  TCP localhost:9051->localhost:37277 (ESTABLISHED)
   # tor     2001 atagar   15u  IPv4  22024      0t0  TCP localhost:9051->localhost:51849 (ESTABLISHED)
   # python  2462 atagar    3u  IPv4  14047      0t0  TCP localhost:37277->localhost:9051 (ESTABLISHED)
   # python  3444 atagar    3u  IPv4  22023      0t0  TCP localhost:51849->localhost:9051 (ESTABLISHED)
   
   if not ports:
     self.resultsLock.acquire()
     self.queryResults = {}
     self.isResolving = False
     self.resultsLock.release()
     
     # wakes threads waiting on results
     self._cond.acquire()
     self._cond.notifyAll()
     self._cond.release()
     
     return
   
   results = {}
   lsofArgs = []
   
   # Uses results from the last query if we have any, otherwise appends the
   # port to the lsof command. This has the potential for persisting dirty
   # results but if we're querying by the dynamic port on the local tcp
   # connections then this should be very rare (and definitely worth the
   # chance of being able to skip an lsof query altogether).
   for port in ports:
     if port in self.queryResults:
       results[port] = self.queryResults[port]
     else: lsofArgs.append("-i tcp:%s" % port)
   
   if lsofArgs:
     lsofResults = sysTools.call("lsof -nP " + " ".join(lsofArgs))
   else: lsofResults = None
   
   if not lsofResults and self.failureCount != -1:
     # lsof query failed and we aren't yet sure if it's possible to
     # successfully get results on this platform
     self.failureCount += 1
     self.isResolving = False
     return
   elif lsofResults:
     # (iPort, oPort) tuple for our own process, if it was fetched
     ourConnection = None
     
     for line in lsofResults:
       lineComp = line.split()
       
       if len(lineComp) == 10 and lineComp[9] == "(ESTABLISHED)":
         cmd, pid, _, _, _, _, _, _, portMap, _ = lineComp
         
         if "->" in portMap:
           iPort, oPort = portMap.split("->")
           iPort = iPort.split(":")[1]
           oPort = oPort.split(":")[1]
           
           # entry belongs to our own process
           if pid == str(os.getpid()):
             cmd = self.scriptName
             ourConnection = (iPort, oPort)
           
           if iPort.isdigit() and oPort.isdigit():
             newEntry = (iPort, oPort, cmd, pid)
             
             # adds the entry under the key of whatever we queried it with
             # (this might be both the inbound _and_ outbound ports)
             for portMatch in (iPort, oPort):
               if portMatch in ports:
                 if portMatch in results:
                   results[portMatch].append(newEntry)
                 else: results[portMatch] = [newEntry]
     
     # making the lsof call generated an extraneous sh entry for our own connection
     if ourConnection:
       for ourPort in ourConnection:
         if ourPort in results:
           shIndex = None
           
           for i in range(len(results[ourPort])):
             if results[ourPort][i][2] == "sh":
               shIndex = i
               break
           
           if shIndex != None:
             del results[ourPort][shIndex]
   
   self.resultsLock.acquire()
   self.failureCount = -1
   self.queryResults = results
   self.isResolving = False
   self.resultsLock.release()
   
   # wakes threads waiting on results
   self._cond.acquire()
   self._cond.notifyAll()
   self._cond.release()
コード例 #22
0
ファイル: connections.py プロジェクト: JustMe23/arm
 """
 
 if resolutionCmd == Resolver.PROC:
   # Attempts resolution via checking the proc contents.
   if not processPid:
     raise ValueError("proc resolution requires a pid")
   
   try:
     return procTools.getConnections(processPid)
   except Exception, exc:
     raise IOError(str(exc))
 else:
   # Queries a resolution utility (netstat, lsof, etc). This raises an
   # IOError if the command fails or isn't available.
   cmd = getResolverCommand(resolutionCmd, processName, processPid)
   results = sysTools.call(cmd)
   
   if not results: raise IOError("No results found using: %s" % cmd)
   
   # parses results for the resolution command
   conn = []
   for line in results:
     if resolutionCmd == Resolver.LSOF:
       # Different versions of lsof have different numbers of columns, so
       # stripping off the optional 'established' entry so we can just use
       # the last one.
       comp = line.replace("(ESTABLISHED)", "").strip().split()
     else: comp = line.split()
     
     if resolutionCmd == Resolver.NETSTAT:
       localIp, localPort = comp[3].split(":")
コード例 #23
0
ファイル: headerPanel.py プロジェクト: zhou-kz/arm
    def _update(self, setStatic=False):
        """
    Updates stats in the vals mapping. By default this just revises volatile
    attributes.
    
    Arguments:
      setStatic - resets all parameters, including relatively static values
    """

        self.valsLock.acquire()
        conn = torTools.getConn()

        if setStatic:
            # version is truncated to first part, for instance:
            # 0.2.2.13-alpha (git-feb8c1b5f67f2c6f) -> 0.2.2.13-alpha
            self.vals["tor/version"] = conn.getInfo("version",
                                                    "Unknown").split()[0]
            self.vals["tor/versionStatus"] = conn.getInfo(
                "status/version/current", "Unknown")
            self.vals["tor/nickname"] = conn.getOption("Nickname", "")
            self.vals["tor/orPort"] = conn.getOption("ORPort", "0")
            self.vals["tor/dirPort"] = conn.getOption("DirPort", "0")
            self.vals["tor/controlPort"] = conn.getOption("ControlPort", "")
            self.vals["tor/isAuthPassword"] = conn.getOption(
                "HashedControlPassword") != None
            self.vals["tor/isAuthCookie"] = conn.getOption(
                "CookieAuthentication") == "1"

            # orport is reported as zero if unset
            if self.vals["tor/orPort"] == "0": self.vals["tor/orPort"] = ""

            # overwrite address if ORListenAddress is set (and possibly orPort too)
            self.vals["tor/address"] = "Unknown"
            listenAddr = conn.getOption("ORListenAddress")
            if listenAddr:
                if ":" in listenAddr:
                    # both ip and port overwritten
                    self.vals["tor/address"] = listenAddr[:listenAddr.find(":"
                                                                           )]
                    self.vals["tor/orPort"] = listenAddr[listenAddr.find(":") +
                                                         1:]
                else:
                    self.vals["tor/address"] = listenAddr

            # fetch exit policy (might span over multiple lines)
            policyEntries = []
            for exitPolicy in conn.getOption("ExitPolicy", [], True):
                policyEntries += [
                    policy.strip() for policy in exitPolicy.split(",")
                ]
            self.vals["tor/exitPolicy"] = ", ".join(policyEntries)

            # system information
            unameVals = os.uname()
            self.vals["sys/hostname"] = unameVals[1]
            self.vals["sys/os"] = unameVals[0]
            self.vals["sys/version"] = unameVals[2]

            pid = conn.getMyPid()
            self.vals["stat/pid"] = pid if pid else ""

            # reverts volatile parameters to defaults
            self.vals["tor/fingerprint"] = "Unknown"
            self.vals["tor/flags"] = []
            self.vals["stat/%torCpu"] = "0"
            self.vals["stat/%armCpu"] = "0"
            self.vals["stat/rss"] = "0"
            self.vals["stat/%mem"] = "0"
            self.vals["stat/etime"] = ""

        # sets volatile parameters
        volatile = {}

        # TODO: This can change, being reported by STATUS_SERVER -> EXTERNAL_ADDRESS
        # events. Introduce caching via torTools?
        if self.vals["tor/address"] == "Unknown":
            volatile["tor/address"] = conn.getInfo("address",
                                                   self.vals["tor/address"])

        volatile["tor/fingerprint"] = conn.getInfo(
            "fingerprint", self.vals["tor/fingerprint"])
        volatile["tor/flags"] = conn.getMyFlags(self.vals["tor/flags"])

        # ps derived stats
        psParams = ["%cpu", "rss", "%mem", "etime"]
        if self.vals["stat/pid"]:
            # if call fails then everything except etime are zeroed out (most likely
            # tor's no longer running)
            volatile["stat/%torCpu"] = "0"
            volatile["stat/rss"] = "0"
            volatile["stat/%mem"] = "0"

            # the ps call formats results as:
            # %CPU   RSS %MEM     ELAPSED
            # 0.3 14096  1.3       29:51
            psRate = self._config["queries.ps.rate"]
            psCall = sysTools.call(
                "ps -p %s -o %s" % (self.vals["stat/pid"], ",".join(psParams)),
                psRate, True)

            if psCall and len(psCall) >= 2:
                stats = psCall[1].strip().split()

                if len(stats) == len(psParams):
                    for i in range(len(psParams)):
                        paramName = psParams[i]
                        if paramName == "%cpu": paramName = "%torCpu"
                        volatile["stat/" + paramName] = stats[i]

        # determines the cpu time for the arm process (including user and system
        # time of both the primary and child processes)

        currentTime, totalCpuTime = time.time(), sum(os.times()[:3])
        cpuDelta = totalCpuTime - self._armCpuSampling[0]
        timeDelta = currentTime - self._armCpuSampling[1]
        self.vals["stat/%armCpu"] = "%0.1f" % (100 * cpuDelta / timeDelta)
        self._armCpuSampling = (totalCpuTime, currentTime)

        # checks if any changes have been made and merges volatile into vals

        self._isChanged |= setStatic
        for key, val in volatile.items():
            self._isChanged |= self.vals[key] != val
            self.vals[key] = val

        self._lastUpdate = currentTime
        self.valsLock.release()
コード例 #24
0
    def prepopulateFromState(self):
        """
    Attempts to use tor's state file to prepopulate values for the 15 minute
    interval via the BWHistoryReadValues/BWHistoryWriteValues values. This
    returns True if successful and False otherwise.
    """

        # checks that this is a relay (if ORPort is unset, then skip)
        conn = torTools.getConn()
        orPort = conn.getOption("ORPort")
        if orPort == "0": return

        # gets the uptime (using the same parameters as the header panel to take
        # advantage of caching
        uptime = None
        queryPid = conn.getMyPid()
        if queryPid:
            queryParam = ["%cpu", "rss", "%mem", "etime"]
            queryCmd = "ps -p %s -o %s" % (queryPid, ",".join(queryParam))
            psCall = sysTools.call(queryCmd, 3600, True)

            if psCall and len(psCall) == 2:
                stats = psCall[1].strip().split()
                if len(stats) == 4: uptime = stats[3]

        # checks if tor has been running for at least a day, the reason being that
        # the state tracks a day's worth of data and this should only prepopulate
        # results associated with this tor instance
        if not uptime or not "-" in uptime:
            msg = PREPOPULATE_FAILURE_MSG % "insufficient uptime"
            log.log(self._config["log.graph.bw.prepopulateFailure"], msg)
            return False

        # get the user's data directory (usually '~/.tor')
        dataDir = conn.getOption("DataDirectory")
        if not dataDir:
            msg = PREPOPULATE_FAILURE_MSG % "data directory not found"
            log.log(self._config["log.graph.bw.prepopulateFailure"], msg)
            return False

        # attempt to open the state file
        try:
            stateFile = open("%s%s/state" % (conn.getPathPrefix(), dataDir),
                             "r")
        except IOError:
            msg = PREPOPULATE_FAILURE_MSG % "unable to read the state file"
            log.log(self._config["log.graph.bw.prepopulateFailure"], msg)
            return False

        # get the BWHistory entries (ordered oldest to newest) and number of
        # intervals since last recorded
        bwReadEntries, bwWriteEntries = None, None
        missingReadEntries, missingWriteEntries = None, None

        # converts from gmt to local with respect to DST
        tz_offset = time.altzone if time.localtime()[8] else time.timezone

        for line in stateFile:
            line = line.strip()

            # According to the rep_hist_update_state() function the BWHistory*Ends
            # correspond to the start of the following sampling period. Also, the
            # most recent values of BWHistory*Values appear to be an incremental
            # counter for the current sampling period. Hence, offsets are added to
            # account for both.

            if line.startswith("BWHistoryReadValues"):
                bwReadEntries = line[20:].split(",")
                bwReadEntries = [
                    int(entry) / 1024.0 / 900 for entry in bwReadEntries
                ]
                bwReadEntries.pop()
            elif line.startswith("BWHistoryWriteValues"):
                bwWriteEntries = line[21:].split(",")
                bwWriteEntries = [
                    int(entry) / 1024.0 / 900 for entry in bwWriteEntries
                ]
                bwWriteEntries.pop()
            elif line.startswith("BWHistoryReadEnds"):
                lastReadTime = time.mktime(
                    time.strptime(line[18:], "%Y-%m-%d %H:%M:%S")) - tz_offset
                lastReadTime -= 900
                missingReadEntries = int((time.time() - lastReadTime) / 900)
            elif line.startswith("BWHistoryWriteEnds"):
                lastWriteTime = time.mktime(
                    time.strptime(line[19:], "%Y-%m-%d %H:%M:%S")) - tz_offset
                lastWriteTime -= 900
                missingWriteEntries = int((time.time() - lastWriteTime) / 900)

        if not bwReadEntries or not bwWriteEntries or not lastReadTime or not lastWriteTime:
            msg = PREPOPULATE_FAILURE_MSG % "bandwidth stats missing from state file"
            log.log(self._config["log.graph.bw.prepopulateFailure"], msg)
            return False

        # fills missing entries with the last value
        bwReadEntries += [bwReadEntries[-1]] * missingReadEntries
        bwWriteEntries += [bwWriteEntries[-1]] * missingWriteEntries

        # crops starting entries so they're the same size
        entryCount = min(len(bwReadEntries), len(bwWriteEntries), self.maxCol)
        bwReadEntries = bwReadEntries[len(bwReadEntries) - entryCount:]
        bwWriteEntries = bwWriteEntries[len(bwWriteEntries) - entryCount:]

        # gets index for 15-minute interval
        intervalIndex = 0
        for indexEntry in graphPanel.UPDATE_INTERVALS:
            if indexEntry[1] == 900: break
            else: intervalIndex += 1

        # fills the graphing parameters with state information
        for i in range(entryCount):
            readVal, writeVal = bwReadEntries[i], bwWriteEntries[i]

            self.lastPrimary, self.lastSecondary = readVal, writeVal

            self.prepopulatePrimaryTotal += readVal * 900
            self.prepopulateSecondaryTotal += writeVal * 900
            self.prepopulateTicks += 900

            self.primaryCounts[intervalIndex].insert(0, readVal)
            self.secondaryCounts[intervalIndex].insert(0, writeVal)

        self.maxPrimary[intervalIndex] = max(self.primaryCounts)
        self.maxSecondary[intervalIndex] = max(self.secondaryCounts)
        del self.primaryCounts[intervalIndex][self.maxCol + 1:]
        del self.secondaryCounts[intervalIndex][self.maxCol + 1:]

        msg = PREPOPULATE_SUCCESS_MSG
        missingSec = time.time() - min(lastReadTime, lastWriteTime)
        if missingSec:
            msg += " (%s is missing)" % uiTools.getTimeLabel(
                missingSec, 0, True)
        log.log(self._config["log.graph.bw.prepopulateSuccess"], msg)

        return True