Ejemplo n.º 1
0
    def getListingEntry(self, width, currentTime, listingType):
        """
    Provides the tuple list for this connection's listing. Lines are composed
    of the following components:
      <src>  -->  <dst>     <etc>     <uptime> (<type>)
    
    ListingType.IP_ADDRESS:
      src - <internal addr:port> --> <external addr:port>
      dst - <destination addr:port>
      etc - <fingerprint> <nickname>
    
    ListingType.HOSTNAME:
      src - localhost:<port>
      dst - <destination hostname:port>
      etc - <destination addr:port> <fingerprint> <nickname>
    
    ListingType.FINGERPRINT:
      src - localhost
      dst - <destination fingerprint>
      etc - <nickname> <destination addr:port>
    
    ListingType.NICKNAME:
      src - <source nickname>
      dst - <destination nickname>
      etc - <fingerprint> <destination addr:port>
    
    Arguments:
      width       - maximum length of the line
      currentTime - unix timestamp for what the results should consider to be
                    the current time
      listingType - primary attribute we're listing connections by
    """

        # fetch our (most likely cached) display entry for the listing
        myListing = entries.ConnectionPanelLine.getListingEntry(
            self, width, currentTime, listingType)

        # fill in the current uptime and return the results
        if CONFIG["features.connection.markInitialConnections"]:
            timePrefix = "+" if self.isInitialConnection else " "
        else:
            timePrefix = ""

        timeLabel = timePrefix + "%5s" % str_tools.get_time_label(
            currentTime - self.startTime, 1)
        myListing[2] = (timeLabel, myListing[2][1])

        return myListing
Ejemplo n.º 2
0
 def getListingEntry(self, width, currentTime, listingType):
   """
   Provides the tuple list for this connection's listing. Lines are composed
   of the following components:
     <src>  -->  <dst>     <etc>     <uptime> (<type>)
   
   ListingType.IP_ADDRESS:
     src - <internal addr:port> --> <external addr:port>
     dst - <destination addr:port>
     etc - <fingerprint> <nickname>
   
   ListingType.HOSTNAME:
     src - localhost:<port>
     dst - <destination hostname:port>
     etc - <destination addr:port> <fingerprint> <nickname>
   
   ListingType.FINGERPRINT:
     src - localhost
     dst - <destination fingerprint>
     etc - <nickname> <destination addr:port>
   
   ListingType.NICKNAME:
     src - <source nickname>
     dst - <destination nickname>
     etc - <fingerprint> <destination addr:port>
   
   Arguments:
     width       - maximum length of the line
     currentTime - unix timestamp for what the results should consider to be
                   the current time
     listingType - primary attribute we're listing connections by
   """
   
   # fetch our (most likely cached) display entry for the listing
   myListing = entries.ConnectionPanelLine.getListingEntry(self, width, currentTime, listingType)
   
   # fill in the current uptime and return the results
   if CONFIG["features.connection.markInitialConnections"]:
     timePrefix = "+" if self.isInitialConnection else " "
   else: timePrefix = ""
   
   timeLabel = timePrefix + "%5s" % str_tools.get_time_label(currentTime - self.startTime, 1)
   myListing[2] = (timeLabel, myListing[2][1])
   
   return myListing
Ejemplo n.º 3
0
  def test_get_time_label(self):
    """
    Checks the get_time_label() function.
    """

    # test the pydoc examples
    self.assertEquals('2h', str_tools.get_time_label(10000))
    self.assertEquals('1.0 minute', str_tools.get_time_label(61, 1, True))
    self.assertEquals('1.01 minutes', str_tools.get_time_label(61, 2, True))

    self.assertEquals('0s', str_tools.get_time_label(0))
    self.assertEquals('0 seconds', str_tools.get_time_label(0, is_long = True))
    self.assertEquals('0.00s', str_tools.get_time_label(0, 2))
    self.assertEquals('-10s', str_tools.get_time_label(-10))

    self.assertRaises(TypeError, str_tools.get_time_label, None)
    self.assertRaises(TypeError, str_tools.get_time_label, 'hello world')
Ejemplo n.º 4
0
  def test_get_time_label(self):
    """
    Checks the get_time_label() function.
    """

    # test the pydoc examples
    self.assertEquals('2h', str_tools.get_time_label(10000))
    self.assertEquals('1.0 minute', str_tools.get_time_label(61, 1, True))
    self.assertEquals('1.01 minutes', str_tools.get_time_label(61, 2, True))

    self.assertEquals('0s', str_tools.get_time_label(0))
    self.assertEquals('0 seconds', str_tools.get_time_label(0, is_long = True))
    self.assertEquals('0.00s', str_tools.get_time_label(0, 2))
    self.assertEquals('-10s', str_tools.get_time_label(-10))

    self.assertRaises(TypeError, str_tools.get_time_label, None)
    self.assertRaises(TypeError, str_tools.get_time_label, 'hello world')
Ejemplo n.º 5
0
 def _getValue(self):
   """
   Provides the current value of the configuration entry, taking advantage of
   the torTools caching to effectively query the accurate value. This uses the
   value's type to provide a user friendly representation if able.
   """
   
   confValue = ", ".join(torTools.getConn().getOption(self.get(Field.OPTION), [], True))
   
   # provides nicer values for recognized types
   if not confValue: confValue = "<none>"
   elif self.get(Field.TYPE) == "Boolean" and confValue in ("0", "1"):
     confValue = "False" if confValue == "0" else "True"
   elif self.get(Field.TYPE) == "DataSize" and confValue.isdigit():
     confValue = str_tools.get_size_label(int(confValue))
   elif self.get(Field.TYPE) == "TimeInterval" and confValue.isdigit():
     confValue = str_tools.get_time_label(int(confValue), is_long = True)
   
   return confValue
Ejemplo n.º 6
0
    def _getValue(self):
        """
    Provides the current value of the configuration entry, taking advantage of
    the torTools caching to effectively query the accurate value. This uses the
    value's type to provide a user friendly representation if able.
    """

        confValue = ", ".join(torTools.getConn().getOption(self.get(Field.OPTION), [], True))

        # provides nicer values for recognized types
        if not confValue:
            confValue = "<none>"
        elif self.get(Field.TYPE) == "Boolean" and confValue in ("0", "1"):
            confValue = "False" if confValue == "0" else "True"
        elif self.get(Field.TYPE) == "DataSize" and confValue.isdigit():
            confValue = str_tools.get_size_label(int(confValue))
        elif self.get(Field.TYPE) == "TimeInterval" and confValue.isdigit():
            confValue = str_tools.get_time_label(int(confValue), is_long=True)

        return confValue
Ejemplo n.º 7
0
 def prepopulateFromState(self):
   """
   Attempts to use tor's state file to prepopulate values for the 15 minute
   interval via the BWHistoryReadValues/BWHistoryWriteValues values. This
   returns True if successful and False otherwise.
   """
   
   # checks that this is a relay (if ORPort is unset, then skip)
   conn = torTools.getConn()
   orPort = conn.getOption("ORPort", None)
   if orPort == "0": return
   
   # gets the uptime (using the same parameters as the header panel to take
   # advantage of caching)
   # TODO: stem dropped system caching support so we'll need to think of
   # something else
   uptime = None
   queryPid = conn.getMyPid()
   if queryPid:
     queryParam = ["%cpu", "rss", "%mem", "etime"]
     queryCmd = "ps -p %s -o %s" % (queryPid, ",".join(queryParam))
     psCall = system.call(queryCmd, None)
     
     if psCall and len(psCall) == 2:
       stats = psCall[1].strip().split()
       if len(stats) == 4: uptime = stats[3]
   
   # checks if tor has been running for at least a day, the reason being that
   # the state tracks a day's worth of data and this should only prepopulate
   # results associated with this tor instance
   if not uptime or not "-" in uptime:
     msg = PREPOPULATE_FAILURE_MSG % "insufficient uptime"
     log.notice(msg)
     return False
   
   # get the user's data directory (usually '~/.tor')
   dataDir = conn.getOption("DataDirectory", None)
   if not dataDir:
     msg = PREPOPULATE_FAILURE_MSG % "data directory not found"
     log.notice(msg)
     return False
   
   # attempt to open the state file
   try: stateFile = open("%s%s/state" % (conn.getPathPrefix(), dataDir), "r")
   except IOError:
     msg = PREPOPULATE_FAILURE_MSG % "unable to read the state file"
     log.notice(msg)
     return False
   
   # get the BWHistory entries (ordered oldest to newest) and number of
   # intervals since last recorded
   bwReadEntries, bwWriteEntries = None, None
   missingReadEntries, missingWriteEntries = None, None
   
   # converts from gmt to local with respect to DST
   tz_offset = time.altzone if time.localtime()[8] else time.timezone
   
   for line in stateFile:
     line = line.strip()
     
     # According to the rep_hist_update_state() function the BWHistory*Ends
     # correspond to the start of the following sampling period. Also, the
     # most recent values of BWHistory*Values appear to be an incremental
     # counter for the current sampling period. Hence, offsets are added to
     # account for both.
     
     if line.startswith("BWHistoryReadValues"):
       bwReadEntries = line[20:].split(",")
       bwReadEntries = [int(entry) / 1024.0 / 900 for entry in bwReadEntries]
       bwReadEntries.pop()
     elif line.startswith("BWHistoryWriteValues"):
       bwWriteEntries = line[21:].split(",")
       bwWriteEntries = [int(entry) / 1024.0 / 900 for entry in bwWriteEntries]
       bwWriteEntries.pop()
     elif line.startswith("BWHistoryReadEnds"):
       lastReadTime = time.mktime(time.strptime(line[18:], "%Y-%m-%d %H:%M:%S")) - tz_offset
       lastReadTime -= 900
       missingReadEntries = int((time.time() - lastReadTime) / 900)
     elif line.startswith("BWHistoryWriteEnds"):
       lastWriteTime = time.mktime(time.strptime(line[19:], "%Y-%m-%d %H:%M:%S")) - tz_offset
       lastWriteTime -= 900
       missingWriteEntries = int((time.time() - lastWriteTime) / 900)
   
   if not bwReadEntries or not bwWriteEntries or not lastReadTime or not lastWriteTime:
     msg = PREPOPULATE_FAILURE_MSG % "bandwidth stats missing from state file"
     log.notice(msg)
     return False
   
   # fills missing entries with the last value
   bwReadEntries += [bwReadEntries[-1]] * missingReadEntries
   bwWriteEntries += [bwWriteEntries[-1]] * missingWriteEntries
   
   # crops starting entries so they're the same size
   entryCount = min(len(bwReadEntries), len(bwWriteEntries), self.maxCol)
   bwReadEntries = bwReadEntries[len(bwReadEntries) - entryCount:]
   bwWriteEntries = bwWriteEntries[len(bwWriteEntries) - entryCount:]
   
   # gets index for 15-minute interval
   intervalIndex = 0
   for indexEntry in graphPanel.UPDATE_INTERVALS:
     if indexEntry[1] == 900: break
     else: intervalIndex += 1
   
   # fills the graphing parameters with state information
   for i in range(entryCount):
     readVal, writeVal = bwReadEntries[i], bwWriteEntries[i]
     
     self.lastPrimary, self.lastSecondary = readVal, writeVal
     
     self.prepopulatePrimaryTotal += readVal * 900
     self.prepopulateSecondaryTotal += writeVal * 900
     self.prepopulateTicks += 900
     
     self.primaryCounts[intervalIndex].insert(0, readVal)
     self.secondaryCounts[intervalIndex].insert(0, writeVal)
   
   self.maxPrimary[intervalIndex] = max(self.primaryCounts)
   self.maxSecondary[intervalIndex] = max(self.secondaryCounts)
   del self.primaryCounts[intervalIndex][self.maxCol + 1:]
   del self.secondaryCounts[intervalIndex][self.maxCol + 1:]
   
   msg = PREPOPULATE_SUCCESS_MSG
   missingSec = time.time() - min(lastReadTime, lastWriteTime)
   if missingSec: msg += " (%s is missing)" % str_tools.get_time_label(missingSec, 0, True)
   log.notice(msg)
   
   return True
Ejemplo n.º 8
0
def validate(contents=None):
    """
  Performs validation on the given torrc contents, providing back a listing of
  (line number, issue, msg) tuples for issues found. If the issue occures on a
  multiline torrc entry then the line number is for the last line of the entry.
  
  Arguments:
    contents - torrc contents
  """

    conn = torTools.getConn()
    customOptions = getCustomOptions()
    issuesFound, seenOptions = [], []

    # Strips comments and collapses multiline multi-line entries, for more
    # information see:
    # https://trac.torproject.org/projects/tor/ticket/1929
    strippedContents, multilineBuffer = [], ""
    for line in _stripComments(contents):
        if not line: strippedContents.append("")
        else:
            line = multilineBuffer + line
            multilineBuffer = ""

            if line.endswith("\\"):
                multilineBuffer = line[:-1]
                strippedContents.append("")
            else:
                strippedContents.append(line.strip())

    for lineNumber in range(len(strippedContents) - 1, -1, -1):
        lineText = strippedContents[lineNumber]
        if not lineText: continue

        lineComp = lineText.split(None, 1)
        if len(lineComp) == 2: option, value = lineComp
        else: option, value = lineText, ""

        # Tor is case insensetive when parsing its torrc. This poses a bit of an
        # issue for us because we want all of our checks to be case insensetive
        # too but also want messages to match the normal camel-case conventions.
        #
        # Using the customOptions to account for this. It contains the tor reported
        # options (camel case) and is either a matching set or the following defaut
        # value check will fail. Hence using that hash to correct the case.
        #
        # TODO: when refactoring for stem make this less confusing...

        for customOpt in customOptions:
            if customOpt.lower() == option.lower():
                option = customOpt
                break

        # if an aliased option then use its real name
        if option in CONFIG["torrc.alias"]:
            option = CONFIG["torrc.alias"][option]

        # most parameters are overwritten if defined multiple times
        if option in seenOptions and not option in getMultilineParameters():
            issuesFound.append((lineNumber, ValidationError.DUPLICATE, option))
            continue
        else:
            seenOptions.append(option)

        # checks if the value isn't necessary due to matching the defaults
        if not option in customOptions:
            issuesFound.append(
                (lineNumber, ValidationError.IS_DEFAULT, option))

        # replace aliases with their recognized representation
        if option in CONFIG["torrc.alias"]:
            option = CONFIG["torrc.alias"][option]

        # tor appears to replace tabs with a space, for instance:
        # "accept\t*:563" is read back as "accept *:563"
        value = value.replace("\t", " ")

        # parse value if it's a size or time, expanding the units
        value, valueType = _parseConfValue(value)

        # issues GETCONF to get the values tor's currently configured to use
        torValues = conn.getOption(option, [], True)

        # multiline entries can be comma separated values (for both tor and conf)
        valueList = [value]
        if option in getMultilineParameters():
            valueList = [val.strip() for val in value.split(",")]

            fetchedValues, torValues = torValues, []
            for fetchedValue in fetchedValues:
                for fetchedEntry in fetchedValue.split(","):
                    fetchedEntry = fetchedEntry.strip()
                    if not fetchedEntry in torValues:
                        torValues.append(fetchedEntry)

        for val in valueList:
            # checks if both the argument and tor's value are empty
            isBlankMatch = not val and not torValues

            if not isBlankMatch and not val in torValues:
                # converts corrections to reader friedly size values
                displayValues = torValues
                if valueType == ValueType.SIZE:
                    displayValues = [
                        str_tools.get_size_label(int(val)) for val in torValues
                    ]
                elif valueType == ValueType.TIME:
                    displayValues = [
                        str_tools.get_time_label(int(val)) for val in torValues
                    ]

                issuesFound.append((lineNumber, ValidationError.MISMATCH,
                                    ", ".join(displayValues)))

    # checks if any custom options are missing from the torrc
    for option in customOptions:
        # In new versions the 'DirReqStatistics' option is true by default and
        # disabled on startup if geoip lookups are unavailable. If this option is
        # missing then that's most likely the reason.
        #
        # https://trac.torproject.org/projects/tor/ticket/4237

        if option == "DirReqStatistics": continue

        if not option in seenOptions:
            issuesFound.append((None, ValidationError.MISSING, option))

    return issuesFound
Ejemplo n.º 9
0
def validate(contents = None):
  """
  Performs validation on the given torrc contents, providing back a listing of
  (line number, issue, msg) tuples for issues found. If the issue occures on a
  multiline torrc entry then the line number is for the last line of the entry.
  
  Arguments:
    contents - torrc contents
  """
  
  conn = torTools.getConn()
  customOptions = getCustomOptions()
  issuesFound, seenOptions = [], []
  
  # Strips comments and collapses multiline multi-line entries, for more
  # information see:
  # https://trac.torproject.org/projects/tor/ticket/1929
  strippedContents, multilineBuffer = [], ""
  for line in _stripComments(contents):
    if not line: strippedContents.append("")
    else:
      line = multilineBuffer + line
      multilineBuffer = ""
      
      if line.endswith("\\"):
        multilineBuffer = line[:-1]
        strippedContents.append("")
      else:
        strippedContents.append(line.strip())
  
  for lineNumber in range(len(strippedContents) - 1, -1, -1):
    lineText = strippedContents[lineNumber]
    if not lineText: continue
    
    lineComp = lineText.split(None, 1)
    if len(lineComp) == 2: option, value = lineComp
    else: option, value = lineText, ""
    
    # Tor is case insensetive when parsing its torrc. This poses a bit of an
    # issue for us because we want all of our checks to be case insensetive
    # too but also want messages to match the normal camel-case conventions.
    #
    # Using the customOptions to account for this. It contains the tor reported
    # options (camel case) and is either a matching set or the following defaut
    # value check will fail. Hence using that hash to correct the case.
    #
    # TODO: when refactoring for stem make this less confusing...
    
    for customOpt in customOptions:
      if customOpt.lower() == option.lower():
        option = customOpt
        break
    
    # if an aliased option then use its real name
    if option in CONFIG["torrc.alias"]:
      option = CONFIG["torrc.alias"][option]
    
    # most parameters are overwritten if defined multiple times
    if option in seenOptions and not option in getMultilineParameters():
      issuesFound.append((lineNumber, ValidationError.DUPLICATE, option))
      continue
    else: seenOptions.append(option)
    
    # checks if the value isn't necessary due to matching the defaults
    if not option in customOptions:
      issuesFound.append((lineNumber, ValidationError.IS_DEFAULT, option))
    
    # replace aliases with their recognized representation
    if option in CONFIG["torrc.alias"]:
      option = CONFIG["torrc.alias"][option]
    
    # tor appears to replace tabs with a space, for instance:
    # "accept\t*:563" is read back as "accept *:563"
    value = value.replace("\t", " ")
    
    # parse value if it's a size or time, expanding the units
    value, valueType = _parseConfValue(value)
    
    # issues GETCONF to get the values tor's currently configured to use
    torValues = conn.getOption(option, [], True)
    
    # multiline entries can be comma separated values (for both tor and conf)
    valueList = [value]
    if option in getMultilineParameters():
      valueList = [val.strip() for val in value.split(",")]
      
      fetchedValues, torValues = torValues, []
      for fetchedValue in fetchedValues:
        for fetchedEntry in fetchedValue.split(","):
          fetchedEntry = fetchedEntry.strip()
          if not fetchedEntry in torValues:
            torValues.append(fetchedEntry)
    
    for val in valueList:
      # checks if both the argument and tor's value are empty
      isBlankMatch = not val and not torValues
      
      if not isBlankMatch and not val in torValues:
        # converts corrections to reader friedly size values
        displayValues = torValues
        if valueType == ValueType.SIZE:
          displayValues = [str_tools.get_size_label(int(val)) for val in torValues]
        elif valueType == ValueType.TIME:
          displayValues = [str_tools.get_time_label(int(val)) for val in torValues]
        
        issuesFound.append((lineNumber, ValidationError.MISMATCH, ", ".join(displayValues)))
  
  # checks if any custom options are missing from the torrc
  for option in customOptions:
    # In new versions the 'DirReqStatistics' option is true by default and
    # disabled on startup if geoip lookups are unavailable. If this option is
    # missing then that's most likely the reason.
    #
    # https://trac.torproject.org/projects/tor/ticket/4237
    
    if option == "DirReqStatistics": continue
    
    if not option in seenOptions:
      issuesFound.append((None, ValidationError.MISSING, option))
  
  return issuesFound
Ejemplo n.º 10
0
    def prepopulateFromState(self):
        """
    Attempts to use tor's state file to prepopulate values for the 15 minute
    interval via the BWHistoryReadValues/BWHistoryWriteValues values. This
    returns True if successful and False otherwise.
    """

        # checks that this is a relay (if ORPort is unset, then skip)
        conn = torTools.getConn()
        orPort = conn.getOption("ORPort", None)
        if orPort == "0": return

        # gets the uptime (using the same parameters as the header panel to take
        # advantage of caching)
        # TODO: stem dropped system caching support so we'll need to think of
        # something else
        uptime = None
        queryPid = conn.getMyPid()
        if queryPid:
            queryParam = ["%cpu", "rss", "%mem", "etime"]
            queryCmd = "ps -p %s -o %s" % (queryPid, ",".join(queryParam))
            psCall = system.call(queryCmd, None)

            if psCall and len(psCall) == 2:
                stats = psCall[1].strip().split()
                if len(stats) == 4: uptime = stats[3]

        # checks if tor has been running for at least a day, the reason being that
        # the state tracks a day's worth of data and this should only prepopulate
        # results associated with this tor instance
        if not uptime or not "-" in uptime:
            msg = PREPOPULATE_FAILURE_MSG % "insufficient uptime"
            log.notice(msg)
            return False

        # get the user's data directory (usually '~/.tor')
        dataDir = conn.getOption("DataDirectory", None)
        if not dataDir:
            msg = PREPOPULATE_FAILURE_MSG % "data directory not found"
            log.notice(msg)
            return False

        # attempt to open the state file
        try:
            stateFile = open("%s%s/state" % (conn.getPathPrefix(), dataDir),
                             "r")
        except IOError:
            msg = PREPOPULATE_FAILURE_MSG % "unable to read the state file"
            log.notice(msg)
            return False

        # get the BWHistory entries (ordered oldest to newest) and number of
        # intervals since last recorded
        bwReadEntries, bwWriteEntries = None, None
        missingReadEntries, missingWriteEntries = None, None

        # converts from gmt to local with respect to DST
        tz_offset = time.altzone if time.localtime()[8] else time.timezone

        for line in stateFile:
            line = line.strip()

            # According to the rep_hist_update_state() function the BWHistory*Ends
            # correspond to the start of the following sampling period. Also, the
            # most recent values of BWHistory*Values appear to be an incremental
            # counter for the current sampling period. Hence, offsets are added to
            # account for both.

            if line.startswith("BWHistoryReadValues"):
                bwReadEntries = line[20:].split(",")
                bwReadEntries = [
                    int(entry) / 1024.0 / 900 for entry in bwReadEntries
                ]
                bwReadEntries.pop()
            elif line.startswith("BWHistoryWriteValues"):
                bwWriteEntries = line[21:].split(",")
                bwWriteEntries = [
                    int(entry) / 1024.0 / 900 for entry in bwWriteEntries
                ]
                bwWriteEntries.pop()
            elif line.startswith("BWHistoryReadEnds"):
                lastReadTime = time.mktime(
                    time.strptime(line[18:], "%Y-%m-%d %H:%M:%S")) - tz_offset
                lastReadTime -= 900
                missingReadEntries = int((time.time() - lastReadTime) / 900)
            elif line.startswith("BWHistoryWriteEnds"):
                lastWriteTime = time.mktime(
                    time.strptime(line[19:], "%Y-%m-%d %H:%M:%S")) - tz_offset
                lastWriteTime -= 900
                missingWriteEntries = int((time.time() - lastWriteTime) / 900)

        if not bwReadEntries or not bwWriteEntries or not lastReadTime or not lastWriteTime:
            msg = PREPOPULATE_FAILURE_MSG % "bandwidth stats missing from state file"
            log.notice(msg)
            return False

        # fills missing entries with the last value
        bwReadEntries += [bwReadEntries[-1]] * missingReadEntries
        bwWriteEntries += [bwWriteEntries[-1]] * missingWriteEntries

        # crops starting entries so they're the same size
        entryCount = min(len(bwReadEntries), len(bwWriteEntries), self.maxCol)
        bwReadEntries = bwReadEntries[len(bwReadEntries) - entryCount:]
        bwWriteEntries = bwWriteEntries[len(bwWriteEntries) - entryCount:]

        # gets index for 15-minute interval
        intervalIndex = 0
        for indexEntry in graphPanel.UPDATE_INTERVALS:
            if indexEntry[1] == 900: break
            else: intervalIndex += 1

        # fills the graphing parameters with state information
        for i in range(entryCount):
            readVal, writeVal = bwReadEntries[i], bwWriteEntries[i]

            self.lastPrimary, self.lastSecondary = readVal, writeVal

            self.prepopulatePrimaryTotal += readVal * 900
            self.prepopulateSecondaryTotal += writeVal * 900
            self.prepopulateTicks += 900

            self.primaryCounts[intervalIndex].insert(0, readVal)
            self.secondaryCounts[intervalIndex].insert(0, writeVal)

        self.maxPrimary[intervalIndex] = max(self.primaryCounts)
        self.maxSecondary[intervalIndex] = max(self.secondaryCounts)
        del self.primaryCounts[intervalIndex][self.maxCol + 1:]
        del self.secondaryCounts[intervalIndex][self.maxCol + 1:]

        msg = PREPOPULATE_SUCCESS_MSG
        missingSec = time.time() - min(lastReadTime, lastWriteTime)
        if missingSec:
            msg += " (%s is missing)" % str_tools.get_time_label(
                missingSec, 0, True)
        log.notice(msg)

        return True
Ejemplo n.º 11
0
 def draw(self, width, height):
   """ Redraws graph panel """
   
   if self.currentDisplay:
     param = self.getAttr("stats")[self.currentDisplay]
     graphCol = min((width - 10) / 2, param.maxCol)
     
     primaryColor = uiTools.getColor(param.getColor(True))
     secondaryColor = uiTools.getColor(param.getColor(False))
     
     if self.isTitleVisible(): self.addstr(0, 0, param.getTitle(width), curses.A_STANDOUT)
     
     # top labels
     left, right = param.getHeaderLabel(width / 2, True), param.getHeaderLabel(width / 2, False)
     if left: self.addstr(1, 0, left, curses.A_BOLD | primaryColor)
     if right: self.addstr(1, graphCol + 5, right, curses.A_BOLD | secondaryColor)
     
     # determines max/min value on the graph
     if self.bounds == Bounds.GLOBAL_MAX:
       primaryMaxBound = int(param.maxPrimary[self.updateInterval])
       secondaryMaxBound = int(param.maxSecondary[self.updateInterval])
     else:
       # both Bounds.LOCAL_MAX and Bounds.TIGHT use local maxima
       if graphCol < 2:
         # nothing being displayed
         primaryMaxBound, secondaryMaxBound = 0, 0
       else:
         primaryMaxBound = int(max(param.primaryCounts[self.updateInterval][1:graphCol + 1]))
         secondaryMaxBound = int(max(param.secondaryCounts[self.updateInterval][1:graphCol + 1]))
     
     primaryMinBound = secondaryMinBound = 0
     if self.bounds == Bounds.TIGHT:
       primaryMinBound = int(min(param.primaryCounts[self.updateInterval][1:graphCol + 1]))
       secondaryMinBound = int(min(param.secondaryCounts[self.updateInterval][1:graphCol + 1]))
       
       # if the max = min (ie, all values are the same) then use zero lower
       # bound so a graph is still displayed
       if primaryMinBound == primaryMaxBound: primaryMinBound = 0
       if secondaryMinBound == secondaryMaxBound: secondaryMinBound = 0
     
     # displays upper and lower bounds
     self.addstr(2, 0, "%4i" % primaryMaxBound, primaryColor)
     self.addstr(self.graphHeight + 1, 0, "%4i" % primaryMinBound, primaryColor)
     
     self.addstr(2, graphCol + 5, "%4i" % secondaryMaxBound, secondaryColor)
     self.addstr(self.graphHeight + 1, graphCol + 5, "%4i" % secondaryMinBound, secondaryColor)
     
     # displays intermediate bounds on every other row
     if CONFIG["features.graph.showIntermediateBounds"]:
       ticks = (self.graphHeight - 3) / 2
       for i in range(ticks):
         row = self.graphHeight - (2 * i) - 3
         if self.graphHeight % 2 == 0 and i >= (ticks / 2): row -= 1
         
         if primaryMinBound != primaryMaxBound:
           primaryVal = (primaryMaxBound - primaryMinBound) * (self.graphHeight - row - 1) / (self.graphHeight - 1)
           if not primaryVal in (primaryMinBound, primaryMaxBound): self.addstr(row + 2, 0, "%4i" % primaryVal, primaryColor)
         
         if secondaryMinBound != secondaryMaxBound:
           secondaryVal = (secondaryMaxBound - secondaryMinBound) * (self.graphHeight - row - 1) / (self.graphHeight - 1)
           if not secondaryVal in (secondaryMinBound, secondaryMaxBound): self.addstr(row + 2, graphCol + 5, "%4i" % secondaryVal, secondaryColor)
     
     # creates bar graph (both primary and secondary)
     for col in range(graphCol):
       colCount = int(param.primaryCounts[self.updateInterval][col + 1]) - primaryMinBound
       colHeight = min(self.graphHeight, self.graphHeight * colCount / (max(1, primaryMaxBound) - primaryMinBound))
       for row in range(colHeight): self.addstr(self.graphHeight + 1 - row, col + 5, " ", curses.A_STANDOUT | primaryColor)
       
       colCount = int(param.secondaryCounts[self.updateInterval][col + 1]) - secondaryMinBound
       colHeight = min(self.graphHeight, self.graphHeight * colCount / (max(1, secondaryMaxBound) - secondaryMinBound))
       for row in range(colHeight): self.addstr(self.graphHeight + 1 - row, col + graphCol + 10, " ", curses.A_STANDOUT | secondaryColor)
     
     # bottom labeling of x-axis
     intervalSec = 1 # seconds per labeling
     for i in range(len(UPDATE_INTERVALS)):
       if i == self.updateInterval: intervalSec = UPDATE_INTERVALS[i][1]
     
     intervalSpacing = 10 if graphCol >= WIDE_LABELING_GRAPH_COL else 5
     unitsLabel, decimalPrecision = None, 0
     for i in range((graphCol - 4) / intervalSpacing):
       loc = (i + 1) * intervalSpacing
       timeLabel = str_tools.get_time_label(loc * intervalSec, decimalPrecision)
       
       if not unitsLabel: unitsLabel = timeLabel[-1]
       elif unitsLabel != timeLabel[-1]:
         # upped scale so also up precision of future measurements
         unitsLabel = timeLabel[-1]
         decimalPrecision += 1
       else:
         # if constrained on space then strips labeling since already provided
         timeLabel = timeLabel[:-1]
       
       self.addstr(self.graphHeight + 2, 4 + loc, timeLabel, primaryColor)
       self.addstr(self.graphHeight + 2, graphCol + 10 + loc, timeLabel, secondaryColor)
       
     param.draw(self, width, height) # allows current stats to modify the display