def run(self): """ Keeps stats updated, checking for new information at a set rate. """ lastDraw = time.time() - 1 while not self._halt: currentTime = time.time() if self.isPaused( ) or currentTime - lastDraw < 1 or not self._isTorConnected: self._cond.acquire() if not self._halt: self._cond.wait(0.2) self._cond.release() else: # Update the volatile attributes (cpu, memory, flags, etc) if we have # a new resource usage sampling (the most dynamic stat) or its been # twenty seconds since last fetched (so we still refresh occasionally # when resource fetches fail). # # Otherwise, just redraw the panel to change the uptime field. isChanged = False if self.vals["tor/pid"]: resourceTracker = sysTools.getResourceTracker( self.vals["tor/pid"]) isChanged = self._lastResourceFetch != resourceTracker.getRunCount( ) if isChanged or currentTime - self._lastUpdate >= 20: self._update() self.redraw(True) lastDraw += 1
def run(self): """ Keeps stats updated, checking for new information at a set rate. """ lastDraw = time.time() - 1 while not self._halt: currentTime = time.time() if self.isPaused() or currentTime - lastDraw < 1 or not self._isTorConnected: self._cond.acquire() if not self._halt: self._cond.wait(0.2) self._cond.release() else: # Update the volatile attributes (cpu, memory, flags, etc) if we have # a new resource usage sampling (the most dynamic stat) or its been # twenty seconds since last fetched (so we still refresh occasionally # when resource fetches fail). # # Otherwise, just redraw the panel to change the uptime field. isChanged = False if self.vals["tor/pid"]: resourceTracker = sysTools.getResourceTracker(self.vals["tor/pid"]) isChanged = self._lastResourceFetch != resourceTracker.getRunCount() if isChanged or currentTime - self._lastUpdate >= 20: self._update() self.redraw(True) lastDraw += 1
def eventTick(self): """ Fetch the cached measurement of resource usage from the ResourceTracker. """ primary, secondary = 0, 0 if self.queryPid: resourceTracker = sysTools.getResourceTracker(self.queryPid, True) if resourceTracker and not resourceTracker.lastQueryFailed(): primary, _, secondary, _ = resourceTracker.getResourceUsage() primary *= 100 # decimal percentage to whole numbers secondary /= 1048576 # translate size to MB so axis labels are short self._processEvent(primary, secondary)
def _update(self, setStatic=False): """ Updates stats in the vals mapping. By default this just revises volatile attributes. Arguments: setStatic - resets all parameters, including relatively static values """ self.valsLock.acquire() conn = torTools.getConn() if setStatic: # version is truncated to first part, for instance: # 0.2.2.13-alpha (git-feb8c1b5f67f2c6f) -> 0.2.2.13-alpha self.vals["tor/version"] = conn.getInfo("version", "Unknown").split()[0] self.vals["tor/versionStatus"] = conn.getInfo( "status/version/current", "Unknown") self.vals["tor/nickname"] = conn.getOption("Nickname", "") self.vals["tor/orPort"] = conn.getOption("ORPort", "0") self.vals["tor/dirPort"] = conn.getOption("DirPort", "0") self.vals["tor/controlPort"] = conn.getOption("ControlPort", "0") self.vals["tor/socketPath"] = conn.getOption("ControlSocket", "") self.vals["tor/isAuthPassword"] = conn.getOption( "HashedControlPassword", None) != None self.vals["tor/isAuthCookie"] = conn.getOption( "CookieAuthentication", None) == "1" # orport is reported as zero if unset if self.vals["tor/orPort"] == "0": self.vals["tor/orPort"] = "" # overwrite address if ORListenAddress is set (and possibly orPort too) self.vals["tor/orListenAddr"] = "" listenAddr = conn.getOption("ORListenAddress", None) if listenAddr: if ":" in listenAddr: # both ip and port overwritten self.vals["tor/orListenAddr"] = listenAddr[:listenAddr. find(":")] self.vals["tor/orPort"] = listenAddr[listenAddr.find(":") + 1:] else: self.vals["tor/orListenAddr"] = listenAddr # fetch exit policy (might span over multiple lines) policyEntries = [] for exitPolicy in conn.getOption("ExitPolicy", [], True): policyEntries += [ policy.strip() for policy in exitPolicy.split(",") ] self.vals["tor/exitPolicy"] = ", ".join(policyEntries) # file descriptor limit for the process, if this can't be determined # then the limit is None fdLimit, fdIsEstimate = conn.getMyFileDescriptorLimit() self.vals["tor/fdLimit"] = fdLimit self.vals["tor/isFdLimitEstimate"] = fdIsEstimate # system information unameVals = os.uname() self.vals["sys/hostname"] = unameVals[1] self.vals["sys/os"] = unameVals[0] self.vals["sys/version"] = unameVals[2] pid = conn.getMyPid() self.vals["tor/pid"] = pid if pid else "" startTime = conn.getStartTime() self.vals["tor/startTime"] = startTime if startTime else "" # reverts volatile parameters to defaults self.vals["tor/fingerprint"] = "Unknown" self.vals["tor/flags"] = [] self.vals["tor/fdUsed"] = 0 self.vals["stat/%torCpu"] = "0" self.vals["stat/%armCpu"] = "0" self.vals["stat/rss"] = "0" self.vals["stat/%mem"] = "0" # sets volatile parameters # TODO: This can change, being reported by STATUS_SERVER -> EXTERNAL_ADDRESS # events. Introduce caching via torTools? self.vals["tor/address"] = conn.getInfo("address", "") self.vals["tor/fingerprint"] = conn.getInfo( "fingerprint", self.vals["tor/fingerprint"]) self.vals["tor/flags"] = conn.getMyFlags(self.vals["tor/flags"]) # Updates file descriptor usage and logs if the usage is high. If we don't # have a known limit or it's obviously faulty (being lower than our # current usage) then omit file descriptor functionality. if self.vals["tor/fdLimit"]: fdUsed = conn.getMyFileDescriptorUsage() if fdUsed and fdUsed <= self.vals["tor/fdLimit"]: self.vals["tor/fdUsed"] = fdUsed else: self.vals["tor/fdUsed"] = 0 if self.vals["tor/fdUsed"] and self.vals["tor/fdLimit"]: fdPercent = 100 * self.vals["tor/fdUsed"] / self.vals["tor/fdLimit"] estimatedLabel = " estimated" if self.vals[ "tor/isFdLimitEstimate"] else "" msg = "Tor's%s file descriptor usage is at %i%%." % ( estimatedLabel, fdPercent) if fdPercent >= 90 and not self._isFdNinetyPercentWarned: self._isFdSixtyPercentWarned, self._isFdNinetyPercentWarned = True, True msg += " If you run out Tor will be unable to continue functioning." log.warn(msg) elif fdPercent >= 60 and not self._isFdSixtyPercentWarned: self._isFdSixtyPercentWarned = True log.notice(msg) # ps or proc derived resource usage stats if self.vals["tor/pid"]: resourceTracker = sysTools.getResourceTracker(self.vals["tor/pid"]) if resourceTracker.lastQueryFailed(): self.vals["stat/%torCpu"] = "0" self.vals["stat/rss"] = "0" self.vals["stat/%mem"] = "0" else: cpuUsage, _, memUsage, memUsagePercent = resourceTracker.getResourceUsage( ) self._lastResourceFetch = resourceTracker.getRunCount() self.vals["stat/%torCpu"] = "%0.1f" % (100 * cpuUsage) self.vals["stat/rss"] = str(memUsage) self.vals["stat/%mem"] = "%0.1f" % (100 * memUsagePercent) # determines the cpu time for the arm process (including user and system # time of both the primary and child processes) totalArmCpuTime, currentTime = sum(os.times()[:3]), time.time() armCpuDelta = totalArmCpuTime - self._armCpuSampling[0] armTimeDelta = currentTime - self._armCpuSampling[1] pythonCpuTime = armCpuDelta / armTimeDelta sysCallCpuTime = sysTools.getSysCpuUsage() self.vals["stat/%armCpu"] = "%0.1f" % ( 100 * (pythonCpuTime + sysCallCpuTime)) self._armCpuSampling = (totalArmCpuTime, currentTime) self._lastUpdate = currentTime self.valsLock.release()
def _update(self, setStatic=False): """ Updates stats in the vals mapping. By default this just revises volatile attributes. Arguments: setStatic - resets all parameters, including relatively static values """ self.valsLock.acquire() conn = torTools.getConn() if setStatic: # version is truncated to first part, for instance: # 0.2.2.13-alpha (git-feb8c1b5f67f2c6f) -> 0.2.2.13-alpha self.vals["tor/version"] = conn.getInfo("version", "Unknown").split()[0] self.vals["tor/versionStatus"] = conn.getInfo("status/version/current", "Unknown") self.vals["tor/nickname"] = conn.getOption("Nickname", "") self.vals["tor/orPort"] = conn.getOption("ORPort", "0") self.vals["tor/dirPort"] = conn.getOption("DirPort", "0") self.vals["tor/controlPort"] = conn.getOption("ControlPort", "0") self.vals["tor/socketPath"] = conn.getOption("ControlSocket", "") self.vals["tor/isAuthPassword"] = conn.getOption("HashedControlPassword") != None self.vals["tor/isAuthCookie"] = conn.getOption("CookieAuthentication") == "1" # orport is reported as zero if unset if self.vals["tor/orPort"] == "0": self.vals["tor/orPort"] = "" # overwrite address if ORListenAddress is set (and possibly orPort too) self.vals["tor/orListenAddr"] = "" listenAddr = conn.getOption("ORListenAddress") if listenAddr: if ":" in listenAddr: # both ip and port overwritten self.vals["tor/orListenAddr"] = listenAddr[:listenAddr.find(":")] self.vals["tor/orPort"] = listenAddr[listenAddr.find(":") + 1:] else: self.vals["tor/orListenAddr"] = listenAddr # fetch exit policy (might span over multiple lines) policyEntries = [] for exitPolicy in conn.getOption("ExitPolicy", [], True): policyEntries += [policy.strip() for policy in exitPolicy.split(",")] self.vals["tor/exitPolicy"] = ", ".join(policyEntries) # file descriptor limit for the process, if this can't be determined # then the limit is None fdLimit, fdIsEstimate = conn.getMyFileDescriptorLimit() self.vals["tor/fdLimit"] = fdLimit self.vals["tor/isFdLimitEstimate"] = fdIsEstimate # system information unameVals = os.uname() self.vals["sys/hostname"] = unameVals[1] self.vals["sys/os"] = unameVals[0] self.vals["sys/version"] = unameVals[2] pid = conn.getMyPid() self.vals["tor/pid"] = pid if pid else "" startTime = conn.getStartTime() self.vals["tor/startTime"] = startTime if startTime else "" # reverts volatile parameters to defaults self.vals["tor/fingerprint"] = "Unknown" self.vals["tor/flags"] = [] self.vals["tor/fdUsed"] = 0 self.vals["stat/%torCpu"] = "0" self.vals["stat/%armCpu"] = "0" self.vals["stat/rss"] = "0" self.vals["stat/%mem"] = "0" # sets volatile parameters # TODO: This can change, being reported by STATUS_SERVER -> EXTERNAL_ADDRESS # events. Introduce caching via torTools? self.vals["tor/address"] = conn.getInfo("address", "") self.vals["tor/fingerprint"] = conn.getInfo("fingerprint", self.vals["tor/fingerprint"]) self.vals["tor/flags"] = conn.getMyFlags(self.vals["tor/flags"]) # Updates file descriptor usage and logs if the usage is high. If we don't # have a known limit or it's obviously faulty (being lower than our # current usage) then omit file descriptor functionality. if self.vals["tor/fdLimit"]: fdUsed = conn.getMyFileDescriptorUsage() if fdUsed and fdUsed <= self.vals["tor/fdLimit"]: self.vals["tor/fdUsed"] = fdUsed else: self.vals["tor/fdUsed"] = 0 if self.vals["tor/fdUsed"] and self.vals["tor/fdLimit"]: fdPercent = 100 * self.vals["tor/fdUsed"] / self.vals["tor/fdLimit"] estimatedLabel = " estimated" if self.vals["tor/isFdLimitEstimate"] else "" msg = "Tor's%s file descriptor usage is at %i%%." % (estimatedLabel, fdPercent) if fdPercent >= 90 and not self._isFdNinetyPercentWarned: self._isFdSixtyPercentWarned, self._isFdNinetyPercentWarned = True, True msg += " If you run out Tor will be unable to continue functioning." log.log(self._config["log.fdUsageNinetyPercent"], msg) elif fdPercent >= 60 and not self._isFdSixtyPercentWarned: self._isFdSixtyPercentWarned = True log.log(self._config["log.fdUsageSixtyPercent"], msg) # ps or proc derived resource usage stats if self.vals["tor/pid"]: resourceTracker = sysTools.getResourceTracker(self.vals["tor/pid"]) if resourceTracker.lastQueryFailed(): self.vals["stat/%torCpu"] = "0" self.vals["stat/rss"] = "0" self.vals["stat/%mem"] = "0" else: cpuUsage, _, memUsage, memUsagePercent = resourceTracker.getResourceUsage() self._lastResourceFetch = resourceTracker.getRunCount() self.vals["stat/%torCpu"] = "%0.1f" % (100 * cpuUsage) self.vals["stat/rss"] = str(memUsage) self.vals["stat/%mem"] = "%0.1f" % (100 * memUsagePercent) # determines the cpu time for the arm process (including user and system # time of both the primary and child processes) totalArmCpuTime, currentTime = sum(os.times()[:3]), time.time() armCpuDelta = totalArmCpuTime - self._armCpuSampling[0] armTimeDelta = currentTime - self._armCpuSampling[1] pythonCpuTime = armCpuDelta / armTimeDelta sysCallCpuTime = sysTools.getSysCpuUsage() self.vals["stat/%armCpu"] = "%0.1f" % (100 * (pythonCpuTime + sysCallCpuTime)) self._armCpuSampling = (totalArmCpuTime, currentTime) self._lastUpdate = currentTime self.valsLock.release()