def loadConfig(config): config.update(CONFIG) for configKey in config.getKeys(): # fetches any port.label.* values if configKey.startswith("port.label."): portEntry = configKey[11:] purpose = config.get(configKey) divIndex = portEntry.find("-") if divIndex == -1: # single port if portEntry.isdigit(): PORT_USAGE[portEntry] = purpose else: msg = "Port value isn't numeric for entry: %s" % configKey log.log(CONFIG["log.configEntryTypeError"], msg) else: try: # range of ports (inclusive) minPort = int(portEntry[:divIndex]) maxPort = int(portEntry[divIndex + 1:]) if minPort > maxPort: raise ValueError() for port in range(minPort, maxPort + 1): PORT_USAGE[str(port)] = purpose except ValueError: msg = "Unable to parse port range for entry: %s" % configKey log.log(CONFIG["log.configEntryTypeError"], msg)
def _getOption(self, param, default, fetchType, suppressExc): if not fetchType in ("str", "list", "map"): msg = "BUG: unrecognized fetchType in torTools._getOption (%s)" % fetchType log.log(log.ERR, msg) return default self.connLock.acquire() startTime, raisedExc, isFromCache = time.time(), None, False result = {} if fetchType == "map" else [] if self.isAlive(): if (param, fetchType) in self._cachedConf: isFromCache = True result = self._cachedConf[(param, fetchType)] else: try: if fetchType == "str": getConfVal = self.conn.get_option(param)[0][1] if getConfVal != None: result = getConfVal else: for key, value in self.conn.get_option(param): if value != None: if fetchType == "list": result.append(value) elif fetchType == "map": if key in result: result.append(value) else: result[key] = [value] except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed), exc: if type(exc) == TorCtl.TorCtlClosed: self.close() result, raisedExc = default, exc
def getCorrections(self): """ Performs validation on the loaded contents and provides back the corrections. If validation is disabled then this won't provide any results. """ self.valsLock.acquire() if not self.isLoaded(): returnVal = None else: torVersion = torTools.getConn().getVersion() skipValidation = not CONFIG["features.torrc.validate"] skipValidation |= (torVersion is None or not torVersion.meets_requirements(stem.version.Requirement.GETINFO_CONFIG_TEXT)) if skipValidation: log.log(log.INFO, "Skipping torrc validation (requires tor 0.2.2.7-alpha)") returnVal = {} else: if self.corrections == None: self.corrections = validate(self.contents) returnVal = list(self.corrections) self.valsLock.release() return returnVal
def acMain(version: int = 0): try: ACLIB.init() # Search for all 'ACLIB_<appname>.py' files in the apps directory. files_list = [str(m) for m in os.listdir(APP_DIR) if os.path.isfile(path(APP_DIR, m))] for file_name in files_list: try: # Filename without .py extension module = file_name[:file_name.rfind(".")] if module.find('ACLIB_') > -1: # Import the app to the current program if not yet done. if module not in sys.modules.keys(): importlib.import_module(module) # Initialize the class with the constructor and store it in the app list. class_ctor = getattr(sys.modules[module], module[module.find('ACLIB_') + 6:]) class_obj = class_ctor(ACLIB.AC_DATA, ACLIB.AC_META) ACLIB.APPS[class_obj.title] = class_obj log('Init {0:s}'.format(module)) except Exception as e: log('Problems while initializing {0:s}'.format(file_name)) tb(e) ACLIB.AC_DATA.init() except Exception as e: tb(e) return 'ACLIB'
def acUpdate(delta: float): ACLIB.TIMER += delta # Update every 10 milliseconds. if ACLIB.TIMER > 0.01: ACLIB.TIMER = 0 try: ac.ext_perfBegin('ACLIB_Standalone') ACLIB.AC_DATA.update(delta) ac.ext_perfEnd('ACLIB_Standalone') # Call the update function for every app stored in the app list. for _, app in ACLIB.APPS.items(): if app.active: try: if not app.no_update: ac.ext_perfBegin(app.title) app.update(delta) ac.ext_perfEnd(app.title) except Exception as e: log('Problems while updating app "{0:s}"'.format(app.title)) tb(e) except Exception as e: tb(e)
def load(self, logFailure = False): """ Loads or reloads the torrc contents, raising an IOError if there's a problem. Arguments: logFailure - if the torrc fails to load and we've never provided a warning for this before then logs a warning """ self.valsLock.acquire() # clears contents and caches self.contents, self.configLocation = None, None self.displayableContents = None self.strippedContents = None self.corrections = None try: self.configLocation = getConfigLocation() configFile = open(self.configLocation, "r") self.contents = configFile.readlines() configFile.close() except IOError, exc: if logFailure and not self.isLoadFailWarned: msg = "Unable to load torrc (%s)" % sysTools.getFileErrorMsg(exc) log.log(CONFIG["log.torrc.readFailed"], msg) self.isLoadFailWarned = True self.valsLock.release() raise exc
def getStrCSV(self, key, default = None, count = None): """ Fetches the given key as a comma separated value. This provides back a list with the stripped values. Arguments: key - config setting to be fetched default - value provided if no such key exists or doesn't match the count count - if set, then a TypeError is logged (and default returned) if the number of elements doesn't match the count """ confValue = self.getValue(key) if confValue == None: return default else: confComp = [entry.strip() for entry in confValue.split(",")] # check if the count doesn't match if count != None and len(confComp) != count: msg = "Config entry '%s' is expected to be %i comma separated values" % (key, count) if default != None and (isinstance(default, list) or isinstance(default, tuple)): defaultStr = ", ".join([str(i) for i in default]) msg += ", defaulting to '%s'" % defaultStr log.log(CONFIG["log.configEntryTypeError"], msg) return default return confComp
def load_style_from_config(obj: object, class_name: str, app_name: str = None): # If style was not already loaded if class_name not in WidgetStyle.STYLE: config = None if app_name == 'ACLIB_' + class_name: log('Load style for app "{}"'.format(app_name)) # Try to load from app style config if app_name: app_style_file = path(STYLE_DIR, app_name + '.ini') if os.path.isfile(app_style_file): config = Config(app_style_file, check_modules=True) # Try to load from widget style config if not config: class_style_file = path(STYLE_DIR, class_name + '.ini') if os.path.isfile(class_style_file): config = Config(class_style_file, check_modules=True) # Store the style if config: if class_name in config.dict: WidgetStyle.STYLE[class_name] = config.dict[class_name] else: WidgetStyle.STYLE[class_name] = config.dict['DEFAULT'] if app_name: for widget, values in config.dict.items(): WidgetStyle.STYLE[widget] = values if class_name in WidgetStyle.STYLE: for prop, val in WidgetStyle.STYLE[class_name].items(): if hasattr(obj, prop): setattr(obj, prop, val)
def str_to_dict(st): """ 传入依赖字段解析为字段+匹配位置,默认匹配第0个 :param st: 需要解析的依赖字段 :param : :return:解析后{字段+位置} """ d = {} if ',' in st: strlist = st.split(',') for x in strlist: if not re.search(r'\d', x): d[x] = 0 else: m = '' for b in re.findall(r'\d', x): #r = re.compile(r'\d') #d[r.sub('',x)]=int(b) m += b d[x] = int(m) m = '' else: if not re.search(r'\d', st): d[st] = 0 else: m = '' #r = re.compile(r'\d') for n in re.findall(r'\d', st): #d[r.sub('',st)]=int(n) m += n d[st] = int(m) log().info('字段解析中 %s', d) return d
def getValue(self, key, default=None, multiple=False): """ This provides the currently value associated with a given key. If no such key exists then this provides the default. Arguments: key - config setting to be fetched default - value provided if no such key exists multiple - provides back a list of all values if true, otherwise this returns the last loaded configuration value """ self.contentsLock.acquire() if key in self.contents: val = self.contents[key] if not multiple: val = val[-1] self.requestedKeys.add(key) else: msg = "config entry '%s' not found, defaulting to '%s'" % (key, str(default)) log.log(CONFIG["log.configEntryNotFound"], msg) val = default self.contentsLock.release() return val
def __init__(self, stdscr, loggedEvents, config=None): panel.Panel.__init__(self, stdscr, "log", 0) threading.Thread.__init__(self) self.setDaemon(True) # Make sure that the msg.* messages are loaded. Lazy loading it later is # fine, but this way we're sure it happens before warning about unused # config options. loadLogMessages() # regex filters the user has defined self.filterOptions = [] self._config = dict(DEFAULT_CONFIG) if config: config.update(self._config, { "features.log.maxLinesPerEntry": 1, "features.log.prepopulateReadLimit": 0, "features.log.maxRefreshRate": 10, "cache.logPanel.size": 1000}) for filter in self._config["features.log.regex"]: # checks if we can't have more filters if len(self.filterOptions) >= MAX_REGEX_FILTERS: break try: re.compile(filter) self.filterOptions.append(filter) except re.error, exc: msg = "Invalid regular expression pattern (%s): %s" % (exc, filter) log.log(self._config["log.configEntryTypeError"], msg)
def load(self, logFailure=False): """ Loads or reloads the torrc contents, raising an IOError if there's a problem. Arguments: logFailure - if the torrc fails to load and we've never provided a warning for this before then logs a warning """ self.valsLock.acquire() # clears contents and caches self.contents, self.configLocation = None, None self.displayableContents = None self.strippedContents = None self.corrections = None try: self.configLocation = getConfigLocation() configFile = open(self.configLocation, "r") self.contents = configFile.readlines() configFile.close() except IOError, exc: if logFailure and not self.isLoadFailWarned: msg = "Unable to load torrc (%s)" % sysTools.getFileErrorMsg( exc) log.log(CONFIG["log.torrc.readFailed"], msg) self.isLoadFailWarned = True self.valsLock.release() raise exc
def _initColors(): """ Initializes color mappings usable by curses. This can only be done after calling curses.initscr(). """ global COLOR_ATTR_INITIALIZED, COLOR_IS_SUPPORTED if not COLOR_ATTR_INITIALIZED: COLOR_ATTR_INITIALIZED = True COLOR_IS_SUPPORTED = False if not CONFIG["features.colorInterface"]: return try: COLOR_IS_SUPPORTED = curses.has_colors() except curses.error: return # initscr hasn't been called yet # initializes color mappings if color support is available if COLOR_IS_SUPPORTED: colorpair = 0 log.log(CONFIG["log.cursesColorSupport"], "Terminal color support detected and enabled") for colorName in COLOR_LIST: fgColor = COLOR_LIST[colorName] bgColor = -1 # allows for default (possibly transparent) background colorpair += 1 curses.init_pair(colorpair, fgColor, bgColor) COLOR_ATTR[colorName] = curses.color_pair(colorpair) else: log.log(CONFIG["log.cursesColorSupport"], "Terminal color support unavailable")
def _initColors(): """ Initializes color mappings usable by curses. This can only be done after calling curses.initscr(). """ global COLOR_ATTR_INITIALIZED if not COLOR_ATTR_INITIALIZED: COLOR_ATTR_INITIALIZED = True if not CONFIG["features.colorInterface"]: return try: hasColorSupport = curses.has_colors() except curses.error: return # initscr hasn't been called yet # initializes color mappings if color support is available if hasColorSupport: colorpair = 0 log.log(CONFIG["log.cursesColorSupport"], "Terminal color support detected and enabled") for colorName in COLOR_LIST: fgColor = COLOR_LIST[colorName] bgColor = -1 # allows for default (possibly transparent) background colorpair += 1 curses.init_pair(colorpair, fgColor, bgColor) COLOR_ATTR[colorName] = curses.color_pair(colorpair) else: log.log(CONFIG["log.cursesColorSupport"], "Terminal color support unavailable")
def _workerLoop(self): """ Simple producer-consumer loop followed by worker threads. This takes addresses from the unresolvedQueue, attempts to look up its hostname, and adds its results or the error to the resolved cache. Resolver reference provides shared resources used by the thread pool. """ while not self.halt: # if resolver is paused then put a hold on further resolutions if self.isPaused: self.cond.acquire() if not self.halt: self.cond.wait(1) self.cond.release() continue # snags next available ip, timeout is because queue can't be woken up # when 'halt' is set try: ipAddr = self.unresolvedQueue.get_nowait() except Queue.Empty: # no elements ready, wait a little while and try again self.cond.acquire() if not self.halt: self.cond.wait(1) self.cond.release() continue if self.halt: break try: if self.useSocketResolution: result = _resolveViaSocket(ipAddr) else: result = _resolveViaHost(ipAddr) except IOError, exc: result = exc # lookup failed except ValueError, exc: result = exc # dns error self.resolvedLock.acquire() self.resolvedCache[ipAddr] = (result, RESOLVER_COUNTER.next()) # trim cache if excessively large (clearing out oldest entries) if len(self.resolvedCache) > CONFIG["cache.hostnames.size"]: # Providing for concurrent, non-blocking calls require that entries are # never removed from the cache, so this creates a new, trimmed version # instead. # determines minimum age of entries to be kept currentCount = RESOLVER_COUNTER.next() newCacheSize = CONFIG["cache.hostnames.size"] - CONFIG["cache.hostnames.trimSize"] threshold = currentCount - newCacheSize newCache = {} msg = "trimming hostname cache from %i entries to %i" % (len(self.resolvedCache), newCacheSize) log.log(CONFIG["log.hostnameCacheTrimmed"], msg) # checks age of each entry, adding to toDelete if too old for ipAddr, entry in self.resolvedCache.iteritems(): if entry[1] >= threshold: newCache[ipAddr] = entry self.resolvedCache = newCache self.resolvedLock.release()
def get_data(self, id): """根据关键字获取对应数据""" token = self.data[id] if token: # log().info('获取token') return token else: log().error('获取token失败') return
def loadConfig(config): config.update(CONFIG) CONFIG["features.colorOverride"] = "none" colorOverride = config.get("features.colorOverride", "none") if colorOverride != "none": try: setColorOverride(colorOverride) except ValueError, exc: log.log(CONFIG["log.configEntryTypeError"], exc)
def get_request_url(self, row): """ 获取url :param row: 行号 :return: """ col = int(data_config.get_url()) url = self.opera_excel.get_cell_value(row, col) log().info('获取请求地址\n%s', url) return url
def get_request_method(self, row): """ 获取请求方式 :param row: 行号 :return: """ # col 列 col = int(data_config.get_run_way()) request_method = self.opera_excel.get_cell_value(row, col) log().info('获取请求方式 %s', request_method) return request_method
def isExitAllowed(ip, port, exitPolicy, isPrivateRejected): """ Determines if a given connection is a permissable exit with the given policy or not (True if it's allowed to be an exit connection, False otherwise). NOTE: this is a little tricky and liable to need some tweaks """ # might not be set when first starting up if not exitPolicy: return True # TODO: move into a utility and craft some unit tests (this is very error # prone...) # TODO: currently doesn't consider ExitPolicyRejectPrivate (which prevents # connections to private networks and local ip) for entry in exitPolicy.split(","): entry = entry.strip() isAccept = entry.startswith("accept") entry = entry[7:] # strips off "accept " or "reject " # parses ip address (with mask if provided) and port if ":" in entry: entryIP = entry[:entry.find(":")] entryPort = entry[entry.find(":") + 1:] else: entryIP = entry entryPort = "*" #raise AssertionError(str(exitPolicy) + " - " + entryIP + ":" + entryPort) isIPMatch = entryIP == ip or entryIP[0] == "*" if not "-" in entryPort: # single port isPortMatch = entryPort == str(port) or entryPort[0] == "*" else: # port range minPort = int(entryPort[:entryPort.find("-")]) maxPort = int(entryPort[entryPort.find("-") + 1:]) isPortMatch = port >= minPort and port <= maxPort # TODO: Currently being lazy and considering subnet masks or 'private' # keyword to be equivilant to wildcard if it would reject, and none # if it would accept (ie, being conservative with acceptance). Would be # nice to fix at some point. if not isAccept: isIPMatch |= "/" in entryIP or entryIP == "private" if isIPMatch and isPortMatch: return isAccept # we shouldn't ever fall through due to default exit policy log.log(log.WARN, "Exit policy left connection uncategorized: %s:%i" % (ip, port)) return False
def __init__(self, processName, processPid = "", resolveRate = None, handle = None): """ Initializes a new resolver daemon. When no longer needed it's suggested that this is stopped. Arguments: processName - name of the process being resolved processPid - pid of the process being resolved resolveRate - time between resolving connections (in seconds, None if chosen dynamically) handle - name used to query this resolver, this is the processName if undefined """ threading.Thread.__init__(self) self.setDaemon(True) self.processName = processName self.processPid = processPid self.resolveRate = resolveRate self.handle = handle if handle else processName self.defaultRate = CONFIG["queries.connections.minRate"] self.lastLookup = -1 self.overwriteResolver = None self.defaultResolver = Resolver.PROC osType = os.uname()[0] self.resolverOptions = getSystemResolvers(osType) log.log(CONFIG["log.connResolverOptions"], "Operating System: %s, Connection Resolvers: %s" % (osType, ", ".join(self.resolverOptions))) # sets the default resolver to be the first found in the system's PATH # (left as netstat if none are found) for resolver in self.resolverOptions: # Resolver strings correspond to their command with the exception of bsd # resolvers. resolverCmd = resolver.replace(" (bsd)", "") if resolver == Resolver.PROC or sysTools.isAvailable(resolverCmd): self.defaultResolver = resolver break self._connections = [] # connection cache (latest results) self._resolutionCounter = 0 # number of successful connection resolutions self._isPaused = False self._halt = False # terminates thread if true self._cond = threading.Condition() # used for pausing the thread self._subsiquentFailures = 0 # number of failed resolutions with the default in a row self._resolverBlacklist = [] # resolvers that have failed to resolve # Number of sequential times the threshold rate's been too low. This is to # avoid having stray spikes up the rate. self._rateThresholdBroken = 0
def __init__(self, ball: Ball, map: Map, player1: Player, player2: Player, speed=50): self.speed = speed log("creating model") self.ball = ball self.map = map self.player1 = player1 self.player2 = player2
def eventTick(self): """ Processes a ps event. """ psResults = {} # mapping of stat names to their results if self.queryPid and self.queryParam and self.failedCount < FAILURE_THRESHOLD: queryCmd = "ps -p %s -o %s" % (self.queryPid, ",".join(self.queryParam)) psCall = sysTools.call(queryCmd, self.cacheTime, True) if psCall and len(psCall) == 2: # ps provided results (first line is headers, second is stats) stats = psCall[1].strip().split() if len(self.queryParam) == len(stats): # we have a result to match each stat - constructs mapping psResults = dict([(self.queryParam[i], stats[i]) for i in range(len(stats))]) self.failedCount = 0 # had a successful call - reset failure count if not psResults: # ps call failed, if we fail too many times sequentially then abandon # listing (probably due to invalid ps parameters) self.failedCount += 1 if self.failedCount == FAILURE_THRESHOLD: msg = "failed several attempts to query '%s', abandoning ps graph" % queryCmd log.log(self._config["log.graph.ps.abandon"], msg) # if something fails (no pid, ps call failed, etc) then uses last results primary, secondary = self.lastPrimary, self.lastSecondary for isPrimary in (True, False): if isPrimary: statName = self._config["features.graph.ps.primaryStat"] else: statName = self._config["features.graph.ps.secondaryStat"] if statName in psResults: try: result = float(psResults[statName]) # The 'rss' and 'size' parameters provide memory usage in KB. This is # scaled up to MB so the graph's y-high is a reasonable value. if statName in ("rss", "size"): result /= 1024.0 if isPrimary: primary = result else: secondary = result except ValueError: if self.queryParam != HEADER_PS_PARAM: # custom stat provides non-numeric results - give a warning and stop querying it msg = "unable to use non-numeric ps stat '%s' for graphing" % statName log.log(self._config["log.graph.ps.invalidStat"], msg) self.queryParam.remove(statName) self._processEvent(primary, secondary)
def handleKey(self, key): isKeystrokeConsumed = True if key in (ord('n'), ord('N')) and torTools.getConn().isNewnymAvailable(): self.sendNewnym() elif key in (ord('r'), ord('R')) and not self._isTorConnected: torctlConn = None allowPortConnection, allowSocketConnection, _ = starter.allowConnectionTypes( ) if os.path.exists(self._config["startup.interface.socket"] ) and allowSocketConnection: try: torctlConn = torTools.connect_socket( self._config["startup.interface.socket"]) except IOError, exc: if not allowPortConnection: cli.popups.showMsg("Unable to reconnect (%s)" % exc, 3) elif not allowPortConnection: cli.popups.showMsg( "Unable to reconnect (socket '%s' doesn't exist)" % self._config["startup.interface.socket"], 3) if not torctlConn and allowPortConnection: try: ctlAddr, ctlPort = self._config[ "startup.interface.ipAddress"], self._config[ "startup.interface.port"] tmpConn, authType, authValue = TorCtl.TorCtl.preauth_connect( ctlAddr, ctlPort) if authType == TorCtl.TorCtl.AUTH_TYPE.PASSWORD: authValue = cli.popups.inputPrompt( "Controller Password: "******"Reconnected to Tor's control port") cli.popups.showMsg("Tor reconnected", 1) except: # displays notice for the first failed connection attempt if exc.args: cli.popups.showMsg( "Unable to reconnect (%s)" % exc, 3)
def __init__(self, processName, processPid="", resolveRate=None): """ Initializes a new resolver daemon. When no longer needed it's suggested that this is stopped. Arguments: processName - name of the process being resolved processPid - pid of the process being resolved resolveRate - time between resolving connections (in seconds, None if chosen dynamically) """ threading.Thread.__init__(self) self.setDaemon(True) self.processName = processName self.processPid = processPid self.resolveRate = resolveRate self.defaultRate = CONFIG["queries.connections.minRate"] self.lastLookup = -1 self.overwriteResolver = None self.defaultResolver = CMD_NETSTAT osType = os.uname()[0] self.resolverOptions = getSystemResolvers(osType) resolverLabels = ", ".join( [CMD_STR[option] for option in self.resolverOptions]) log.log( CONFIG["log.connResolverOptions"], "Operating System: %s, Connection Resolvers: %s" % (osType, resolverLabels)) # sets the default resolver to be the first found in the system's PATH # (left as netstat if none are found) for resolver in self.resolverOptions: if sysTools.isAvailable(CMD_STR[resolver]): self.defaultResolver = resolver break self._connections = [] # connection cache (latest results) self._isPaused = False self._halt = False # terminates thread if true self._cond = threading.Condition() # used for pausing the thread self._subsiquentFailures = 0 # number of failed resolutions with the default in a row self._resolverBlacklist = [] # resolvers that have failed to resolve # Number of sequential times the threshold rate's been too low. This is to # avoid having stray spikes up the rate. self._rateThresholdBroken = 0
def is_depend(self, row): """ 判断是否有case依赖 :param row:行号 :return: """ col = int(data_config.get_case_depend()) # 获取是否存在数据依赖列 depend_case_id = self.opera_excel.get_cell_value(row, col) if depend_case_id == "": log().info('没有数据依赖') return None else: log().info('获取依赖%s', depend_case_id) return depend_case_id
def post_main( self, url, data=None, header=None, ): log().info('开始请求') res = None try: if header != None: if 'MultipartEncoder' in str(data): res = requests.post(url=url, data=data, headers=header, verify=False) else: res = requests.post(url=url, json=data, headers=header, verify=False) # 必须将data传给json,,修改requests库models.py 第466行 body = complexjson.dumps(json,ensure_ascii=False) else: if 'MultipartEncoder' in str(data): res = requests.post(url=url, data=data, verify=False) else: res = requests.post(url=url, json=data, verify=False) if not res: log().error('响应失败 %s', res) else: log().info('请求完成 %s', res) except Exception as e: log().error('请求失败\n%s', e) return res.json()
def is_token(self, row): """ 是否携带header :param row: 行号 :return: """ col = int(data_config.get_token()) token = self.opera_excel.get_cell_value(row, col) if token != '': log().info('获取是否携带token %s', token) return token else: log().info('获取是否携带token为空') return None
def get_request_data(self, row): """ 获取请求数据 :param row:行号 :return: """ col = int(data_config.get_data()) request_data = self.opera_excel.get_cell_value(row, col) if request_data == '': log().info('没有请求参数') return None # r=json.loads(request_data) #将str转为dict log().info('获取请求参数') return request_data
def get_expcet_data(self, row, _log=True): """ 获取预期结果 :param row: :return: """ col = int(data_config.get_expect()) expect = self.opera_excel.get_cell_value(row, col) if expect == "": log().error('预期结果为空') return None else: if _log: log().info('获取预期结果 %s', expect) return expect
def get_response_token(self): ''' 获取登录返回的token ''' try: token = { "data": { "Authorization": "Bearer " + self.response['data']['token'] } } #,"Content-Type":"application/json" return token except: #print("token获取异常") log().error("token获取异常") pass
def get_headers(self, row): """ 获取携带的headers :param row: 行号 :return: """ col = int(data_config.get_headers()) header = self.opera_excel.get_cell_value(row, col) h = {} if header: h = header_str_dict(header) log().info('获取请求头\n%s', h) return h else: log().info('没有请求头') return h
def write_value(self, row, col, value): """ 回写数据到excel :param row:行 :param col:列 :param value:值 :return: """ read_data = xlrd.open_workbook(self.file_name) write_data = copy(read_data) sheet_data = write_data.get_sheet(0) sheet_data.write(row, col, value) try: write_data.save(self.file_name) except: log().error('excel读取失败')
def handleKey(self, key): isKeystrokeConsumed = True if key in (ord('n'), ord('N')) and torTools.getConn().isNewnymAvailable(): self.sendNewnym() elif key in (ord('r'), ord('R')) and not self._isTorConnected: torctlConn = None allowPortConnection, allowSocketConnection, _ = starter.allowConnectionTypes() if os.path.exists(self._config["startup.interface.socket"]) and allowSocketConnection: try: torctlConn = torTools.connect_socket(self._config["startup.interface.socket"]) except IOError, exc: if not allowPortConnection: cli.popups.showMsg("Unable to reconnect (%s)" % exc, 3) elif not allowPortConnection: cli.popups.showMsg("Unable to reconnect (socket '%s' doesn't exist)" % self._config["startup.interface.socket"], 3) if not torctlConn and allowPortConnection: # TODO: This has diverged from starter.py's connection, for instance it # doesn't account for relative cookie paths or multiple authentication # methods. We can't use the starter.py's connection function directly # due to password prompts, but we could certainly make this mess more # manageable. try: ctlAddr, ctlPort = self._config["startup.interface.ipAddress"], self._config["startup.interface.port"] tmpConn, authType, authValue = TorCtl.TorCtl.preauth_connect(ctlAddr, ctlPort) if authType == TorCtl.TorCtl.AUTH_TYPE.PASSWORD: authValue = cli.popups.inputPrompt("Controller Password: "******"authentication cookie '%s' is the wrong size (%i bytes instead of 32)" % (authValue, authCookieSize)) tmpConn.authenticate(authValue) torctlConn = tmpConn except Exception, exc: # attempts to use the wizard port too try: cli.controller.getController().getTorManager().connectManagedInstance() log.log(log.NOTICE, "Reconnected to Tor's control port") cli.popups.showMsg("Tor reconnected", 1) except: # displays notice for the first failed connection attempt if exc.args: cli.popups.showMsg("Unable to reconnect (%s)" % exc, 3)
def acShutdown(): try: ACLIB.AC_DATA.shutdown() # Call the update function for every app stored in the app list. for _, app in ACLIB.APPS.items(): try: log('Shutdown {0:s}'.format(app.title)) app.shutdown() except Exception as e: log('Problems while shutting down app "{0:s}"'.format(app.title)) tb(e) CONFIG.write() ACLIB.shutdown() except Exception as e: tb(e)
def main(wf): cmd, args = wf.args[0].encode('utf-8'), wf.args[1:] for idx, arg in enumerate(args): args[idx] = arg.encode('utf-8') log(args) try: rst = FUNC[cmd](args, wf) except Exception as e: wf.add_item(title="Coding Tools Err: {0}".format(e), valid=False) else: #直接显示 if rst: wf.add_item(title='{}: {}'.format(cmd, args), subtitle=rst, arg=rst, valid=True) wf.send_feedback()
def setUp(self): self.url = "http://fz.wanfangdata.com.cn/index.do" self.driver = webdriver.Chrome() self.driver.implicitly_wait(20) self.verificationErrors = [] self.wanfang_page = WanfangPage(self.driver) # self.title = u'百度一下,你就知道' self.mylog = log.log()
def __init__(self, processName, processPid = "", resolveRate = None): """ Initializes a new resolver daemon. When no longer needed it's suggested that this is stopped. Arguments: processName - name of the process being resolved processPid - pid of the process being resolved resolveRate - time between resolving connections (in seconds, None if chosen dynamically) """ threading.Thread.__init__(self) self.setDaemon(True) self.processName = processName self.processPid = processPid self.resolveRate = resolveRate self.defaultRate = CONFIG["queries.connections.minRate"] self.lastLookup = -1 self.overwriteResolver = None self.defaultResolver = CMD_NETSTAT osType = os.uname()[0] self.resolverOptions = getSystemResolvers(osType) resolverLabels = ", ".join([CMD_STR[option] for option in self.resolverOptions]) log.log(CONFIG["log.connResolverOptions"], "Operating System: %s, Connection Resolvers: %s" % (osType, resolverLabels)) # sets the default resolver to be the first found in the system's PATH # (left as netstat if none are found) for resolver in self.resolverOptions: if sysTools.isAvailable(CMD_STR[resolver]): self.defaultResolver = resolver break self._connections = [] # connection cache (latest results) self._isPaused = False self._halt = False # terminates thread if true self._cond = threading.Condition() # used for pausing the thread self._subsiquentFailures = 0 # number of failed resolutions with the default in a row self._resolverBlacklist = [] # resolvers that have failed to resolve # Number of sequential times the threshold rate's been too low. This is to # avoid having stray spikes up the rate. self._rateThresholdBroken = 0
def _resetSubwindow(self): """ Create a new subwindow instance for the panel if: - Panel currently doesn't have a subwindow (was uninitialized or invalidated). - There's room for the panel to grow vertically (curses automatically lets subwindows regrow horizontally, but not vertically). - The subwindow has been displaced. This is a curses display bug that manifests if the terminal's shrank then re-expanded. Displaced subwindows are never restored to their proper position, resulting in graphical glitches if we draw to them. - The preferred size is smaller than the actual size (should shrink). This returns True if a new subwindow instance was created, False otherwise. """ newHeight, newWidth = self.getPreferredSize() if newHeight == 0: return False # subwindow would be outside its parent # determines if a new subwindow should be recreated recreate = self.win == None if self.win: subwinMaxY, subwinMaxX = self.win.getmaxyx() recreate |= subwinMaxY < newHeight # check for vertical growth recreate |= self.top > self.win.getparyx()[ 0] # check for displacement recreate |= subwinMaxX > newWidth or subwinMaxY > newHeight # shrinking # I'm not sure if recreating subwindows is some sort of memory leak but the # Python curses bindings seem to lack all of the following: # - subwindow deletion (to tell curses to free the memory) # - subwindow moving/resizing (to restore the displaced windows) # so this is the only option (besides removing subwindows entirely which # would mean far more complicated code and no more selective refreshing) if recreate: self.win = self.parent.subwin(newHeight, newWidth, self.top, self.left) # note: doing this log before setting win produces an infinite loop msg = "recreating panel '%s' with the dimensions of %i/%i" % ( self.getName(), newHeight, newWidth) log.log(CONFIG["log.panelRecreated"], msg) return recreate
def _initColors(): """ Initializes color mappings usable by curses. This can only be done after calling curses.initscr(). """ global COLOR_ATTR_INITIALIZED, COLOR_IS_SUPPORTED if not COLOR_ATTR_INITIALIZED: # hack to replace all ACS characters with '+' if ACS support has been # manually disabled if not CONFIG["features.acsSupport"]: for item in curses.__dict__: if item.startswith("ACS_"): curses.__dict__[item] = ord('+') # replace a few common border pipes that are better rendered as '|' or # '-' instead curses.ACS_SBSB = ord('|') curses.ACS_VLINE = ord('|') curses.ACS_BSBS = ord('-') curses.ACS_HLINE = ord('-') COLOR_ATTR_INITIALIZED = True COLOR_IS_SUPPORTED = False if not CONFIG["features.colorInterface"]: return try: COLOR_IS_SUPPORTED = curses.has_colors() except curses.error: return # initscr hasn't been called yet # initializes color mappings if color support is available if COLOR_IS_SUPPORTED: colorpair = 0 log.log(CONFIG["log.cursesColorSupport"], "Terminal color support detected and enabled") for colorName in COLOR_LIST: fgColor = COLOR_LIST[colorName] bgColor = -1 # allows for default (possibly transparent) background colorpair += 1 curses.init_pair(colorpair, fgColor, bgColor) COLOR_ATTR[colorName] = curses.color_pair(colorpair) else: log.log(CONFIG["log.cursesColorSupport"], "Terminal color support unavailable")
def run_model(): #рух м'ячика log("start model loop") model.run() log("start getting model stats") while True: ball_x = model.ball.x ball_y = model.ball.y player1_x = model.player1.start_coor_x player2_x = model.player2.start_coor_x app.set_ball_coor(ball_x, ball_y) app.set_player_coor(True, player1_x) app.set_player_coor(False, player2_x) # model.set_speed(model.speed + 0.5) time.sleep(1 / model.speed)
def isTorrcAvailable(self): """ True if a wizard generated torrc exists and the user has permissions to run it, false otherwise. """ torrcLoc = self.getTorrcPath() if os.path.exists(torrcLoc): # If we aren't running as root and would be trying to bind to low ports # then the startup will fail due to permissons. Attempts to check for # this in the torrc. If unable to read the torrc then we probably # wouldn't be able to use it anyway with our permissions. if os.getuid() != 0: try: return not torConfig.isRootNeeded(torrcLoc) except IOError, exc: log.log(log.INFO, "Failed to read torrc at '%s': %s" % (torrcLoc, exc)) return False else: return True
def heartbeatCheck(isUnresponsive): """ Logs if its been ten seconds since the last BW event. Arguments: isUnresponsive - flag for if we've indicated to be responsive or not """ conn = torTools.getConn() lastHeartbeat = conn.getHeartbeat() if conn.isAlive() and "BW" in conn.getControllerEvents(): if not isUnresponsive and (time.time() - lastHeartbeat) >= 10: isUnresponsive = True log.log(log.NOTICE, "Relay unresponsive (last heartbeat: %s)" % time.ctime(lastHeartbeat)) elif isUnresponsive and (time.time() - lastHeartbeat) < 10: # really shouldn't happen (meant Tor froze for a bit) isUnresponsive = False log.log(log.NOTICE, "Relay resumed") return isUnresponsive
def _resetSubwindow(self): """ Create a new subwindow instance for the panel if: - Panel currently doesn't have a subwindow (was uninitialized or invalidated). - There's room for the panel to grow vertically (curses automatically lets subwindows regrow horizontally, but not vertically). - The subwindow has been displaced. This is a curses display bug that manifests if the terminal's shrank then re-expanded. Displaced subwindows are never restored to their proper position, resulting in graphical glitches if we draw to them. - The preferred size is smaller than the actual size (should shrink). This returns True if a new subwindow instance was created, False otherwise. """ newHeight, newWidth = self.getPreferredSize() if newHeight == 0: return False # subwindow would be outside its parent # determines if a new subwindow should be recreated recreate = self.win == None if self.win: subwinMaxY, subwinMaxX = self.win.getmaxyx() recreate |= subwinMaxY < newHeight # check for vertical growth recreate |= self.top > self.win.getparyx()[0] # check for displacement recreate |= subwinMaxX > newWidth or subwinMaxY > newHeight # shrinking # I'm not sure if recreating subwindows is some sort of memory leak but the # Python curses bindings seem to lack all of the following: # - subwindow deletion (to tell curses to free the memory) # - subwindow moving/resizing (to restore the displaced windows) # so this is the only option (besides removing subwindows entirely which # would mean far more complicated code and no more selective refreshing) if recreate: self.win = self.parent.subwin(newHeight, newWidth, self.top, 0) # note: doing this log before setting win produces an infinite loop msg = "recreating panel '%s' with the dimensions of %i/%i" % (self.getName(), newHeight, newWidth) log.log(CONFIG["log.panelRecreated"], msg) return recreate
def getIntCSV(self, key, default = None, count = None, minValue = None, maxValue = None): """ Fetches the given comma separated value, logging a TypeError (and returning the default) if the values arne't ints or aren't constrained to the given bounds. Arguments: key - config setting to be fetched default - value provided if no such key exists, doesn't match the count, values aren't all integers, or doesn't match the bounds count - checks that the number of values matches this if set minValue - checks that all values are over this if set maxValue - checks that all values are less than this if set """ confComp = self.getStrCSV(key, default, count) if confComp == default: return default # validates the input, setting the errorMsg if there's a problem errorMsg = None baseErrorMsg = "Config entry '%s' is expected to %%s" % key if default != None and (isinstance(default, list) or isinstance(default, tuple)): defaultStr = ", ".join([str(i) for i in default]) baseErrorMsg += ", defaulting to '%s'" % defaultStr for val in confComp: if not val.isdigit(): errorMsg = baseErrorMsg % "only have integer values" break else: if minValue != None and int(val) < minValue: errorMsg = baseErrorMsg % "only have values over %i" % minValue break elif maxValue != None and int(val) > maxValue: errorMsg = baseErrorMsg % "only have values less than %i" % maxValue break if errorMsg: log.log(CONFIG["log.configEntryTypeError"], errorMsg) return default else: return [int(val) for val in confComp]
def registerEvent(self, event): """ Notes event and redraws log. If paused it's held in a temporary buffer. Arguments: event - LogEntry for the event that occurred """ if not event.type in self.loggedEvents: return # strips control characters to avoid screwing up the terminal event.msg = uiTools.getPrintable(event.msg) # note event in the log file if we're saving them if self.logFile: try: self.logFile.write(event.getDisplayMessage(True) + "\n") self.logFile.flush() except IOError, exc: log.log(self._config["log.logPanel.logFileWriteFailed"], "Unable to write to log file: %s" % sysTools.getFileErrorMsg(exc)) self.logFile = None
def showWizard(): """ Provides a series of prompts, allowing the user to spawn a customized tor instance. """ if not sysTools.isAvailable("tor"): msg = "Unable to run the setup wizard. Is tor installed?" log.log(log.WARN, msg) return # gets tor's version torVersion = None try: versionQuery = sysTools.call("tor --version") for line in versionQuery: if line.startswith("Tor version "): torVersion = torTools.parseVersion(line.split(" ")[2]) break except IOError, exc: log.log(log.INFO, "'tor --version' query failed: %s" % exc)
def new_desc_event(self, event): self.orconnStatusCacheValid = False self._resolveFamilyEntries() for fingerprint in event.idlist: # clears entries with this fingerprint from the cache if fingerprint in self.fingerprintLookupCache.values(): invalidEntries = set(k for k, v in self.fingerprintLookupCache.iteritems() if v == fingerprint) for k in invalidEntries: # nicknameLookupCache keys are a subset of fingerprintLookupCache del self.fingerprintLookupCache[k] if k in self.nicknameLookupCache.keys(): del self.nicknameLookupCache[k] # gets consensus data for the new description try: nsData = self.conn.get_network_status("id/%s" % fingerprint) except (socket.error, TorCtl.ErrorReply, TorCtl.TorCtlClosed): return if len(nsData) > 1: # multiple records for fingerprint (shouldn't happen) log.log(log.WARN, "Multiple consensus entries for fingerprint: %s" % fingerprint) return nsEntry = nsData[0] # updates fingerprintMappings with new data if nsEntry.ip in self.fingerprintMappings.keys(): # if entry already exists with the same orport, remove it orportMatch = None for entryPort, entryFingerprint, entryNickname in self.fingerprintMappings[nsEntry.ip]: if entryPort == nsEntry.orport: orportMatch = (entryPort, entryFingerprint, entryNickname) break if orportMatch: self.fingerprintMappings[nsEntry.ip].remove(orportMatch) # add new entry self.fingerprintMappings[nsEntry.ip].append((nsEntry.orport, nsEntry.idhex, nsEntry.nickname)) else: self.fingerprintMappings[nsEntry.ip] = [(nsEntry.orport, nsEntry.idhex, nsEntry.nickname)] if self.listingType != LIST_HOSTNAME: self.sortConnections()
def makeFilterSelection(self, selectedOption): """ Makes the given filter selection, applying it to the log and reorganizing our filter selection. Arguments: selectedOption - regex filter we've already added, None if no filter should be applied """ if selectedOption: try: self.setFilter(re.compile(selectedOption)) # move selection to top self.filterOptions.remove(selectedOption) self.filterOptions.insert(0, selectedOption) except re.error, exc: # shouldn't happen since we've already checked validity msg = "Invalid regular expression ('%s': %s) - removing from listing" % (selectedOption, exc) log.log(log.WARN, msg) self.filterOptions.remove(selectedOption)
def handleKey(self, key): isKeystrokeConsumed = True if key in (ord('n'), ord('N')) and torTools.getConn().isNewnymAvailable(): self.sendNewnym() elif key in (ord('r'), ord('R')) and not self._isTorConnected: torctlConn = None allowPortConnection, allowSocketConnection, _ = starter.allowConnectionTypes() if os.path.exists(self._config["startup.interface.socket"]) and allowSocketConnection: try: torctlConn = torTools.connect_socket(self._config["startup.interface.socket"]) except IOError, exc: if not allowPortConnection: cli.popups.showMsg("Unable to reconnect (%s)" % exc, 3) elif not allowPortConnection: cli.popups.showMsg("Unable to reconnect (socket '%s' doesn't exist)" % self._config["startup.interface.socket"], 3) if not torctlConn and allowPortConnection: try: ctlAddr, ctlPort = self._config["startup.interface.ipAddress"], self._config["startup.interface.port"] tmpConn, authType, authValue = TorCtl.TorCtl.preauth_connect(ctlAddr, ctlPort) if authType == TorCtl.TorCtl.AUTH_TYPE.PASSWORD: authValue = cli.popups.inputPrompt("Controller Password: "******"Reconnected to Tor's control port") cli.popups.showMsg("Tor reconnected", 1) except: # displays notice for the first failed connection attempt if exc.args: cli.popups.showMsg("Unable to reconnect (%s)" % exc, 3)
def TemplateFromCommands(template, options, depth=0, path=None): if depth > 50: log('WARNING: Maximum depth execeeded (%s) in TemplateFromCommands: %s' % (depth, path)) return template # Find Template commands in our template commands_found = {} for (command, command_data) in options['commands'].items(): regex = '%s(.*?)%s' % (SanitizeRegex(command_data['prefix']), SanitizeRegex(command_data['postfix'])) regex_result = re.findall(regex, template) commands_found[command] = regex_result # Include other files if 'include' in commands_found: for path in commands_found['include']: # If this is a valid file, load it and include it (with it's own TemplateFromCommands processing) if os.path.isfile(path): # Load the template include_template = open(path).read() # Process any commands embedded in this template include_template = TemplateFromCommands(include_template, options, depth=depth+1, path=path) # Build the replacement string insert_replace = '%s%s%s' % (options['commands']['include']['prefix'], path, options['commands']['include']['postfix']) # Insert into our current template template = template.replace(insert_replace, include_template) else: log('WARNING: INCLUDE path not found: %s' % path) # Comments -- Wipe them out, they are made to disappear if 'comment' in commands_found: for path in commands_found['comment']: # Build the replacement string insert_replace = '%s%s%s' % (options['commands']['comment']['prefix'], path, options['commands']['comment']['postfix']) # Empty out the Comment statement template = template.replace(insert_replace, '') # Process TemplateMan spec in-place if 'process' in commands_found and commands_found['process']: log('ERROR: Processing commands found, but this is not yet implemented...') return template
def call(command, cacheAge=0, suppressExc=False, quiet=True): """ Convenience function for performing system calls, providing: - suppression of any writing to stdout, both directing stderr to /dev/null and checking for the existence of commands before executing them - logging of results (command issued, runtime, success/failure, etc) - optional exception suppression and caching (the max age for cached results is a minute) Arguments: command - command to be issued cacheAge - uses cached results rather than issuing a new request if last fetched within this number of seconds (if zero then all caching functionality is skipped) suppressExc - provides None in cases of failure if True, otherwise IOErrors are raised quiet - if True, "2> /dev/null" is appended to all commands """ # caching functionality (fetching and trimming) if cacheAge > 0: global CALL_CACHE # keeps consistency that we never use entries over a minute old (these # results are 'dirty' and might be trimmed at any time) cacheAge = min(cacheAge, 60) cacheSize = CONFIG["cache.sysCalls.size"] # if the cache is especially large then trim old entries if len(CALL_CACHE) > cacheSize: CALL_CACHE_LOCK.acquire() # checks that we haven't trimmed while waiting if len(CALL_CACHE) > cacheSize: # constructs a new cache with only entries less than a minute old newCache, currentTime = {}, time.time() for cachedCommand, cachedResult in CALL_CACHE.items(): if currentTime - cachedResult[0] < 60: newCache[cachedCommand] = cachedResult # if the cache is almost as big as the trim size then we risk doing this # frequently, so grow it and log if len(newCache) > (0.75 * cacheSize): cacheSize = len(newCache) * 2 CONFIG["cache.sysCalls.size"] = cacheSize msg = "growing system call cache to %i entries" % cacheSize log.log(CONFIG["log.sysCallCacheGrowing"], msg) CALL_CACHE = newCache CALL_CACHE_LOCK.release() # checks if we can make use of cached results if command in CALL_CACHE and time.time() - CALL_CACHE[command][0] < cacheAge: cachedResults = CALL_CACHE[command][1] cacheAge = time.time() - CALL_CACHE[command][0] if isinstance(cachedResults, IOError): if IS_FAILURES_CACHED: msg = "system call (cached failure): %s (age: %0.1f, error: %s)" % (command, cacheAge, str(cachedResults)) log.log(CONFIG["log.sysCallCached"], msg) if suppressExc: return None else: raise cachedResults else: # flag was toggled after a failure was cached - reissue call, ignoring the cache return call(command, 0, suppressExc, quiet) else: msg = "system call (cached): %s (age: %0.1f)" % (command, cacheAge) log.log(CONFIG["log.sysCallCached"], msg) return cachedResults startTime = time.time() commandCall, results, errorExc = None, None, None # Gets all the commands involved, taking piping into consideration. If the # pipe is quoted (ie, echo "an | example") then it's ignored. commandComp = [] for component in command.split("|"): if not commandComp or component.count("\"") % 2 == 0: commandComp.append(component) else: # pipe is within quotes commandComp[-1] += "|" + component # preprocessing for the commands to prevent anything going to stdout for i in range(len(commandComp)): subcommand = commandComp[i].strip() if not isAvailable(subcommand): errorExc = IOError("'%s' is unavailable" % subcommand.split(" ")[0]) if quiet: commandComp[i] = "%s 2> /dev/null" % subcommand # processes the system call if not errorExc: try: commandCall = os.popen(" | ".join(commandComp)) results = commandCall.readlines() except IOError, exc: errorExc = exc
# processes the system call if not errorExc: try: commandCall = os.popen(" | ".join(commandComp)) results = commandCall.readlines() except IOError, exc: errorExc = exc # make sure sys call is closed if commandCall: commandCall.close() if errorExc: # log failure and either provide None or re-raise exception msg = "system call (failed): %s (error: %s)" % (command, str(errorExc)) log.log(CONFIG["log.sysCallFailed"], msg) if cacheAge > 0 and IS_FAILURES_CACHED: CALL_CACHE_LOCK.acquire() CALL_CACHE[command] = (time.time(), errorExc) CALL_CACHE_LOCK.release() if suppressExc: return None else: raise errorExc else: # log call information and if we're caching then save the results msg = "system call: %s (runtime: %0.2f)" % (command, time.time() - startTime) log.log(CONFIG["log.sysCallMade"], msg) if cacheAge > 0: CALL_CACHE_LOCK.acquire()
def run(self): while not self._halt: timeSinceReset = time.time() - self.lastLookup if self.resolveRate == 0: self._cond.acquire() if not self._halt: self._cond.wait(0.2) self._cond.release() continue elif timeSinceReset < self.resolveRate: sleepTime = max(0.2, self.resolveRate - timeSinceReset) self._cond.acquire() if not self._halt: self._cond.wait(sleepTime) self._cond.release() continue # done waiting, try again newValues = {} try: if self._useProc: utime, stime, startTime = procTools.getStats(self.processPid, procTools.Stat.CPU_UTIME, procTools.Stat.CPU_STIME, procTools.Stat.START_TIME) totalCpuTime = float(utime) + float(stime) cpuDelta = totalCpuTime - self._lastCpuTotal newValues["cpuSampling"] = cpuDelta / timeSinceReset newValues["cpuAvg"] = totalCpuTime / (time.time() - float(startTime)) newValues["_lastCpuTotal"] = totalCpuTime memUsage = int(procTools.getMemoryUsage(self.processPid)[0]) totalMemory = procTools.getPhysicalMemory() newValues["memUsage"] = memUsage newValues["memUsagePercentage"] = float(memUsage) / totalMemory else: # the ps call formats results as: # # TIME ELAPSED RSS %MEM # 3-08:06:32 21-00:00:12 121844 23.5 # # or if Tor has only recently been started: # # TIME ELAPSED RSS %MEM # 0:04.40 37:57 18772 0.9 psCall = call("ps -p %s -o cputime,etime,rss,%%mem" % self.processPid) isSuccessful = False if psCall and len(psCall) >= 2: stats = psCall[1].strip().split() if len(stats) == 4: try: totalCpuTime = uiTools.parseShortTimeLabel(stats[0]) uptime = uiTools.parseShortTimeLabel(stats[1]) cpuDelta = totalCpuTime - self._lastCpuTotal newValues["cpuSampling"] = cpuDelta / timeSinceReset newValues["cpuAvg"] = totalCpuTime / uptime newValues["_lastCpuTotal"] = totalCpuTime newValues["memUsage"] = int(stats[2]) * 1024 # ps size is in kb newValues["memUsagePercentage"] = float(stats[3]) / 100.0 isSuccessful = True except ValueError, exc: pass if not isSuccessful: raise IOError("unrecognized output from ps: %s" % psCall) except IOError, exc: newValues = {} self._failureCount += 1 if self._useProc: if self._failureCount >= 3: # We've failed three times resolving via proc. Warn, and fall back # to ps resolutions. msg = "Failed three attempts to get process resource usage from proc, falling back to ps (%s)" % exc log.log(CONFIG["log.stats.procResolutionFailover"], msg) self._useProc = False self._failureCount = 1 # prevents lastQueryFailed() from thinking that we succeeded else: # wait a bit and try again msg = "Unable to query process resource usage from proc (%s)" % exc log.log(CONFIG["log.stats.failedProcResolution"], msg) self._cond.acquire() if not self._halt: self._cond.wait(0.5) self._cond.release() else: # exponential backoff on making failed ps calls sleepTime = 0.01 * (2 ** self._failureCount) + self._failureCount msg = "Unable to query process resource usage from ps, waiting %0.2f seconds (%s)" % (sleepTime, exc) log.log(CONFIG["log.stats.failedProcResolution"], msg) self._cond.acquire() if not self._halt: self._cond.wait(sleepTime) self._cond.release() # sets the new values if newValues: # If this is the first run then the cpuSampling stat is meaningless # (there isn't a previous tick to sample from so it's zero at this # point). Setting it to the average, which is a fairer estimate. if self.lastLookup == -1: newValues["cpuSampling"] = newValues["cpuAvg"] self._valLock.acquire() self.cpuSampling = newValues["cpuSampling"] self.cpuAvg = newValues["cpuAvg"] self.memUsage = newValues["memUsage"] self.memUsagePercentage = newValues["memUsagePercentage"] self._lastCpuTotal = newValues["_lastCpuTotal"] self.lastLookup = time.time() self._runCount += 1 self._failureCount = 0 self._valLock.release()
def loadOptionDescriptions(loadPath = None): """ Fetches and parses descriptions for tor's configuration options from its man page. This can be a somewhat lengthy call, and raises an IOError if issues occure. If available, this can load the configuration descriptions from a file where they were previously persisted to cut down on the load time (latency for this is around 200ms). Arguments: loadPath - if set, this attempts to fetch the configuration descriptions from the given path instead of the man page """ CONFIG_DESCRIPTIONS_LOCK.acquire() CONFIG_DESCRIPTIONS.clear() raisedExc = None try: if loadPath: # Input file is expected to be of the form: # <option> # <arg description> # <description, possibly multiple lines> # <PERSIST_ENTRY_DIVIDER> inputFile = open(loadPath, "r") inputFileContents = inputFile.readlines() inputFile.close() # constructs a reverse mapping for categories strToCat = dict([(OPTION_CATEGORY_STR[cat], cat) for cat in OPTION_CATEGORY_STR]) try: versionLine = inputFileContents.pop(0).rstrip() if versionLine.startswith("Tor Version "): fileVersion = versionLine[12:] torVersion = torTools.getConn().getInfo("version", "") if fileVersion != torVersion: msg = "wrong version, tor is %s but the file's from %s" % (torVersion, fileVersion) raise IOError(msg) else: raise IOError("unable to parse version") while inputFileContents: # gets category enum, failing if it doesn't exist categoryStr = inputFileContents.pop(0).rstrip() if categoryStr in strToCat: category = strToCat[categoryStr] else: baseMsg = "invalid category in input file: '%s'" raise IOError(baseMsg % categoryStr) # gets the position in the man page indexArg, indexStr = -1, inputFileContents.pop(0).rstrip() if indexStr.startswith("index: "): indexStr = indexStr[7:] if indexStr.isdigit(): indexArg = int(indexStr) else: raise IOError("non-numeric index value: %s" % indexStr) else: raise IOError("malformed index argument: %s"% indexStr) option = inputFileContents.pop(0).rstrip() argument = inputFileContents.pop(0).rstrip() description, loadedLine = "", inputFileContents.pop(0) while loadedLine != PERSIST_ENTRY_DIVIDER: description += loadedLine if inputFileContents: loadedLine = inputFileContents.pop(0) else: break CONFIG_DESCRIPTIONS[option.lower()] = ManPageEntry(indexArg, category, argument, description.rstrip()) except IndexError: CONFIG_DESCRIPTIONS.clear() raise IOError("input file format is invalid") else: manCallResults = sysTools.call("man tor") # Fetches all options available with this tor instance. This isn't # vital, and the validOptions are left empty if the call fails. conn, validOptions = torTools.getConn(), [] configOptionQuery = conn.getInfo("config/names").strip().split("\n") if configOptionQuery: validOptions = [line[:line.find(" ")].lower() for line in configOptionQuery] optionCount, lastOption, lastArg = 0, None, None lastCategory, lastDescription = GENERAL, "" for line in manCallResults: line = uiTools.getPrintable(line) strippedLine = line.strip() # we have content, but an indent less than an option (ignore line) #if strippedLine and not line.startswith(" " * MAN_OPT_INDENT): continue # line starts with an indent equivilant to a new config option isOptIndent = line.startswith(" " * MAN_OPT_INDENT) and line[MAN_OPT_INDENT] != " " isCategoryLine = not line.startswith(" ") and "OPTIONS" in line # if this is a category header or a new option, add an entry using the # buffered results if isOptIndent or isCategoryLine: # Filters the line based on if the option is recognized by tor or # not. This isn't necessary for arm, so if unable to make the check # then we skip filtering (no loss, the map will just have some extra # noise). strippedDescription = lastDescription.strip() if lastOption and (not validOptions or lastOption.lower() in validOptions): CONFIG_DESCRIPTIONS[lastOption.lower()] = ManPageEntry(optionCount, lastCategory, lastArg, strippedDescription) optionCount += 1 lastDescription = "" # parses the option and argument line = line.strip() divIndex = line.find(" ") if divIndex != -1: lastOption, lastArg = line[:divIndex], line[divIndex + 1:] # if this is a category header then switch it if isCategoryLine: if line.startswith("OPTIONS"): lastCategory = GENERAL elif line.startswith("CLIENT"): lastCategory = CLIENT elif line.startswith("SERVER"): lastCategory = SERVER elif line.startswith("DIRECTORY SERVER"): lastCategory = DIRECTORY elif line.startswith("DIRECTORY AUTHORITY SERVER"): lastCategory = AUTHORITY elif line.startswith("HIDDEN SERVICE"): lastCategory = HIDDEN_SERVICE elif line.startswith("TESTING NETWORK"): lastCategory = TESTING else: msg = "Unrecognized category in the man page: %s" % line.strip() log.log(CONFIG["log.configDescriptions.unrecognizedCategory"], msg) else: # Appends the text to the running description. Empty lines and lines # starting with a specific indentation are used for formatting, for # instance the ExitPolicy and TestingTorNetwork entries. if lastDescription and lastDescription[-1] != "\n": lastDescription += " " if not strippedLine: lastDescription += "\n\n" elif line.startswith(" " * MAN_EX_INDENT): lastDescription += " %s\n" % strippedLine else: lastDescription += strippedLine except IOError, exc: raisedExc = exc
def renderTorrc(template, options, commentIndent = 30): """ Uses the given template to generate a nicely formatted torrc with the given options. The tempating language this recognizes is a simple one, recognizing the following options: [IF <option>] # if <option> maps to true or a non-empty string [IF NOT <option>] # logical inverse [IF <opt1> | <opt2>] # logical or of the options [ELSE] # if the prior conditional evaluated to false [END IF] # ends the control block [<option>] # inputs the option value, omitting the line if it maps # to a boolean or empty string [NEWLINE] # empty line, otherwise templating white space is ignored Arguments: template - torrc template lines used to generate the results options - mapping of keywords to their given values, with values being booleans or strings (possibly multi-line) commentIndent - minimum column that comments align on """ results = [] templateIter = iter(template) commentLineFormat = "%%-%is%%s" % commentIndent try: while True: line = templateIter.next().strip() if line.startswith("[IF ") and line.endswith("]"): # checks if any of the conditional options are true or a non-empty string evaluatesTrue = False for cond in line[4:-1].split("|"): isInverse = False if cond.startswith("NOT "): isInverse = True cond = cond[4:] if isInverse != bool(options.get(cond.strip())): evaluatesTrue = True break if evaluatesTrue: continue else: # skips lines until we come to an else or the end of the block depth = 0 while depth != -1: line = templateIter.next().strip() if line.startswith("[IF ") and line.endswith("]"): depth += 1 elif line == "[END IF]": depth -= 1 elif depth == 0 and line == "[ELSE]": depth -= 1 elif line == "[ELSE]": # an else block we aren't using - skip to the end of it depth = 0 while depth != -1: line = templateIter.next().strip() if line.startswith("[IF "): depth += 1 elif line == "[END IF]": depth -= 1 elif line == "[NEWLINE]": # explicit newline results.append("") elif line.startswith("#"): # comment only results.append(line) elif line.startswith("[") and line.endswith("]"): # completely dynamic entry optValue = options.get(line[1:-1]) if optValue: results.append(optValue) else: # torrc option line option, arg, comment = "", "", "" parsedLine = line if "#" in parsedLine: parsedLine, comment = parsedLine.split("#", 1) parsedLine = parsedLine.strip() comment = "# %s" % comment.strip() # parses the argument from the option if " " in parsedLine.strip(): option, arg = parsedLine.split(" ", 1) option = option.strip() else: log.log(log.INFO, "torrc template option lacks an argument: '%s'" % line) continue # inputs dynamic arguments if arg.startswith("[") and arg.endswith("]"): arg = options.get(arg[1:-1]) # skips argument if it's false or an empty string if not arg: continue torrcEntry = "%s %s" % (option, arg) if comment: results.append(commentLineFormat % (torrcEntry + " ", comment)) else: results.append(torrcEntry) except StopIteration: pass return "\n".join(results)