def bandwidth_event(self, event): resources = nyx.tracker.get_resource_tracker().get_value() self.primary.update(resources.cpu_sample * 100) # decimal percentage to whole numbers self.secondary.update(resources.memory_bytes) self._primary_header_stats = [ '%0.1f%%' % self.primary.latest_value, ', avg: %0.1f%%' % self.primary.average() ] self._secondary_header_stats = [ str_tools.size_label(self.secondary.latest_value, 1), ', avg: %s' % str_tools.size_label(self.secondary.average(), 1) ]
def _size_label(byte_count, decimal = 1): """ Alias for str_tools.size_label() that accounts for if the user prefers bits or bytes. """ return str_tools.size_label(byte_count, decimal, is_bytes = not CONFIG['show_bits'], round = True)
def tutorial_example(): from stem.descriptor.remote import DescriptorDownloader from stem.util import str_tools # provides a mapping of observed bandwidth to the relay nicknames def get_bw_to_relay(): bw_to_relay = {} downloader = DescriptorDownloader() try: for desc in downloader.get_server_descriptors().run(): if desc.exit_policy.is_exiting_allowed(): bw_to_relay.setdefault(desc.observed_bandwidth, []).append(desc.nickname) except Exception as exc: print('Unable to retrieve the server descriptors: %s' % exc) return bw_to_relay # prints the top fifteen relays bw_to_relay = get_bw_to_relay() count = 1 for bw_value in sorted(bw_to_relay.keys(), reverse=True): for nickname in bw_to_relay[bw_value]: print('%i. %s (%s/s)' % (count, nickname, str_tools.size_label(bw_value, 2))) count += 1 if count > 15: return
def _size_label(byte_count, decimal = 1): """ Alias for str_tools.size_label() that accounts for if the user prefers bits or bytes. """ return str_tools.size_label(byte_count, decimal, is_bytes = CONFIG['features.graph.bw.transferInBytes'])
def bandwidth_event(self, event): resources = nyx.tracker.get_resource_tracker().get_value() self.primary.update(resources.cpu_sample * 100) # decimal percentage to whole numbers self.secondary.update(resources.memory_bytes) self._primary_header_stats = ['%0.1f%%' % self.primary.latest_value, ', avg: %0.1f%%' % self.primary.average()] self._secondary_header_stats = [str_tools.size_label(self.secondary.latest_value, 1), ', avg: %s' % str_tools.size_label(self.secondary.average(), 1)]
def tutorial_example(): from stem.descriptor.remote import DescriptorDownloader from stem.util import str_tools # provides a mapping of observed bandwidth to the relay nicknames def get_bw_to_relay(): bw_to_relay = {} downloader = DescriptorDownloader() try: for desc in downloader.get_server_descriptors().run(): if desc.exit_policy.is_exiting_allowed(): bw_to_relay.setdefault(desc.observed_bandwidth, []).append(desc.nickname) except Exception as exc: print('Unable to retrieve the server descriptors: %s' % exc) return bw_to_relay # prints the top fifteen relays bw_to_relay = get_bw_to_relay() count = 1 for bw_value in sorted(bw_to_relay.keys(), reverse = True): for nickname in bw_to_relay[bw_value]: print('%i. %s (%s/s)' % (count, nickname, str_tools.size_label(bw_value, 2))) count += 1 if count > 15: return
def _render_graph(window, bandwidth_rates): window.erase() download_rates = [entry[0] for entry in bandwidth_rates] upload_rates = [entry[1] for entry in bandwidth_rates] # show the latest values at the top label = "Downloaded (%s/s):" % str_tools.size_label(download_rates[0], 1) window.addstr(0, 1, label, DOWNLOAD_COLOR, curses.A_BOLD) label = "Uploaded (%s/s):" % str_tools.size_label(upload_rates[0], 1) window.addstr(0, GRAPH_WIDTH + 7, label, UPLOAD_COLOR, curses.A_BOLD) # draw the graph bounds in KB max_download_rate = max(download_rates) max_upload_rate = max(upload_rates) window.addstr(1, 1, "%4i" % (max_download_rate / 1024), DOWNLOAD_COLOR) window.addstr(GRAPH_HEIGHT, 1, " 0", DOWNLOAD_COLOR) window.addstr(1, GRAPH_WIDTH + 7, "%4i" % (max_upload_rate / 1024), UPLOAD_COLOR) window.addstr(GRAPH_HEIGHT, GRAPH_WIDTH + 7, " 0", UPLOAD_COLOR) # draw the graph for col in range(GRAPH_WIDTH): col_height = GRAPH_HEIGHT * download_rates[col] / max( max_download_rate, 1) for row in range(col_height): window.addstr(GRAPH_HEIGHT - row, col + 6, " ", DOWNLOAD_COLOR, curses.A_STANDOUT) col_height = GRAPH_HEIGHT * upload_rates[col] / max(max_upload_rate, 1) for row in range(col_height): window.addstr(GRAPH_HEIGHT - row, col + GRAPH_WIDTH + 12, " ", UPLOAD_COLOR, curses.A_STANDOUT) window.refresh()
def _size_label(byte_count, decimal=1): """ Alias for str_tools.size_label() that accounts for if the user prefers bits or bytes. """ return str_tools.size_label( byte_count, decimal, is_bytes=CONFIG['features.graph.bw.transferInBytes'])
def _render_graph(window, bandwidth_rates): window.erase() download_rates = [entry[0] for entry in bandwidth_rates] upload_rates = [entry[1] for entry in bandwidth_rates] # show the latest values at the top label = "Downloaded (%s/s):" % str_tools.size_label(download_rates[0], 1) window.addstr(0, 1, label, DOWNLOAD_COLOR, curses.A_BOLD) label = "Uploaded (%s/s):" % str_tools.size_label(upload_rates[0], 1) window.addstr(0, GRAPH_WIDTH + 7, label, UPLOAD_COLOR, curses.A_BOLD) # draw the graph bounds in KB max_download_rate = max(download_rates) max_upload_rate = max(upload_rates) window.addstr(1, 1, "%4i" % (max_download_rate / 1024), DOWNLOAD_COLOR) window.addstr(GRAPH_HEIGHT, 1, " 0", DOWNLOAD_COLOR) window.addstr(1, GRAPH_WIDTH + 7, "%4i" % (max_upload_rate / 1024), UPLOAD_COLOR) window.addstr(GRAPH_HEIGHT, GRAPH_WIDTH + 7, " 0", UPLOAD_COLOR) # draw the graph for col in xrange(GRAPH_WIDTH): col_height = GRAPH_HEIGHT * download_rates[col] / max(max_download_rate, 1) for row in xrange(col_height): window.addstr(GRAPH_HEIGHT - row, col + 6, " ", DOWNLOAD_COLOR, curses.A_STANDOUT) col_height = GRAPH_HEIGHT * upload_rates[col] / max(max_upload_rate, 1) for row in xrange(col_height): window.addstr(GRAPH_HEIGHT - row, col + GRAPH_WIDTH + 12, " ", UPLOAD_COLOR, curses.A_STANDOUT) window.refresh()
def test_size_label(self): """ Checks the size_label() function. """ # test the pydoc examples self.assertEqual('1 MB', str_tools.size_label(2000000)) self.assertEqual('1.02 KB', str_tools.size_label(1050, 2)) self.assertEqual('1.025 Kilobytes', str_tools.size_label(1050, 3, True)) self.assertEqual('0 B', str_tools.size_label(0)) self.assertEqual('0 Bytes', str_tools.size_label(0, is_long = True)) self.assertEqual('0.00 B', str_tools.size_label(0, 2)) self.assertEqual('-10 B', str_tools.size_label(-10)) self.assertEqual('80 b', str_tools.size_label(10, is_bytes = False)) self.assertEqual('-1 MB', str_tools.size_label(-2000000)) # checking that we round down self.assertEqual('23.43 Kb', str_tools.size_label(3000, 2, is_bytes = False)) self.assertRaises(TypeError, str_tools.size_label, None) self.assertRaises(TypeError, str_tools.size_label, 'hello world')
def test_size_label(self): """ Checks the size_label() function. """ # test the pydoc examples self.assertEqual('1 MB', str_tools.size_label(2000000)) self.assertEqual('1.02 KB', str_tools.size_label(1050, 2)) self.assertEqual('1.025 Kilobytes', str_tools.size_label(1050, 3, True)) self.assertEqual('0 B', str_tools.size_label(0)) self.assertEqual('0 Bytes', str_tools.size_label(0, is_long=True)) self.assertEqual('0.00 B', str_tools.size_label(0, 2)) self.assertEqual('-10 B', str_tools.size_label(-10)) self.assertEqual('80 b', str_tools.size_label(10, is_bytes=False)) self.assertEqual('-1 MB', str_tools.size_label(-2000000)) # checking that we round down self.assertEqual('23.43 Kb', str_tools.size_label(3000, 2, is_bytes=False)) self.assertRaises(TypeError, str_tools.size_label, None) self.assertRaises(TypeError, str_tools.size_label, 'hello world')
def value(self): """ Provides the value of this configuration option. :returns: **str** representation of the current config value """ values = tor_controller().get_conf(self.name, [], True) if not values: return '<none>' elif self.value_type == 'Boolean' and values[0] in ('0', '1'): return 'False' if values[0] == '0' else 'True' elif self.value_type == 'DataSize' and values[0].isdigit(): return str_tools.size_label(int(values[0])) elif self.value_type == 'TimeInterval' and values[0].isdigit(): return str_tools.time_label(int(values[0]), is_long = True) else: return ', '.join(values)
def value(self): """ Provides the value of this configuration option. :returns: **str** representation of the current config value """ values = tor_controller().get_conf(self.name, [], True) if not values: return '<none>' elif self.value_type == 'Boolean' and values[0] in ('0', '1'): return 'False' if values[0] == '0' else 'True' elif self.value_type == 'DataSize' and values[0].isdigit(): return str_tools.size_label(int(values[0])) elif self.value_type == 'TimeInterval' and values[0].isdigit(): return str_tools.time_label(int(values[0]), is_long=True) else: return ', '.join(values)
def _get_value(self): """ Provides the current value of the configuration entry, taking advantage of the tor_tools caching to effectively query the accurate value. This uses the value's type to provide a user friendly representation if able. """ conf_value = ', '.join(tor_controller().get_conf(self.get(Field.OPTION), [], True)) # provides nicer values for recognized types if not conf_value: conf_value = '<none>' elif self.get(Field.TYPE) == 'Boolean' and conf_value in ('0', '1'): conf_value = 'False' if conf_value == '0' else 'True' elif self.get(Field.TYPE) == 'DataSize' and conf_value.isdigit(): conf_value = str_tools.size_label(int(conf_value)) elif self.get(Field.TYPE) == 'TimeInterval' and conf_value.isdigit(): conf_value = str_tools.time_label(int(conf_value), is_long = True) return conf_value
def _Config_DropDown(self): for line in self.controller.get_info('config/names').splitlines(): line_comp = line.split() name, value_type = line_comp[0], line_comp[1] values = self.controller.get_conf(name, [], True) if not values: Config = '<none>' elif value_type == 'Boolean' and values[0] in ('0', '1'): if values[0] == '0': Config = 'False' else: Config = 'True' elif value_type == 'DataSize' and values[0].isdigit(): Config = str_tools.size_label(int(values[0])) elif value_type == 'TimeInterval' and values[0].isdigit(): Config = str_tools.time_label(int(values[0]), is_long=True) else: Config = values[0] Value_Type.update({name: value_type}) Configurations.update({name: Config}) self.ui.Config_Options.addItem(name) self._Config_CurrentVal()
def _y_axis_label(self, value, is_primary): return '%i%%' % value if is_primary else str_tools.size_label(value)
import stem.descriptor.remote from stem.util import str_tools # provides a mapping of observed bandwidth to the relay nicknames def get_bw_to_relay(): bw_to_relay = {} try: for desc in stem.descriptor.remote.get_server_descriptors().run(): if desc.exit_policy.is_exiting_allowed(): bw_to_relay.setdefault(desc.observed_bandwidth, []).append(desc.nickname) except Exception as exc: print("Unable to retrieve the server descriptors: %s" % exc) return bw_to_relay # prints the top fifteen relays bw_to_relay = get_bw_to_relay() count = 1 for bw_value in sorted(bw_to_relay.keys(), reverse = True): for nickname in bw_to_relay[bw_value]: print("%i. %s (%s/s)" % (count, nickname, str_tools.size_label(bw_value, 2))) count += 1 if count > 15: sys.exit()
def validate(contents = None): """ Performs validation on the given torrc contents, providing back a listing of (line number, issue, msg) tuples for issues found. If the issue occures on a multiline torrc entry then the line number is for the last line of the entry. Arguments: contents - torrc contents """ controller = tor_controller() custom_options = get_custom_options() issues_found, seen_options = [], [] # Strips comments and collapses multiline multi-line entries, for more # information see: # https://trac.torproject.org/projects/tor/ticket/1929 stripped_contents, multiline_buffer = [], '' for line in _strip_comments(contents): if not line: stripped_contents.append('') else: line = multiline_buffer + line multiline_buffer = '' if line.endswith('\\'): multiline_buffer = line[:-1] stripped_contents.append('') else: stripped_contents.append(line.strip()) for line_number in range(len(stripped_contents) - 1, -1, -1): line_text = stripped_contents[line_number] if not line_text: continue line_comp = line_text.split(None, 1) if len(line_comp) == 2: option, value = line_comp else: option, value = line_text, '' # Tor is case insensetive when parsing its torrc. This poses a bit of an # issue for us because we want all of our checks to be case insensetive # too but also want messages to match the normal camel-case conventions. # # Using the custom_options to account for this. It contains the tor reported # options (camel case) and is either a matching set or the following defaut # value check will fail. Hence using that hash to correct the case. # # TODO: when refactoring for stem make this less confusing... for custom_opt in custom_options: if custom_opt.lower() == option.lower(): option = custom_opt break # if an aliased option then use its real name if option in CONFIG['torrc.alias']: option = CONFIG['torrc.alias'][option] # most parameters are overwritten if defined multiple times if option in seen_options and option not in get_multiline_parameters(): issues_found.append((line_number, ValidationError.DUPLICATE, option)) continue else: seen_options.append(option) # checks if the value isn't necessary due to matching the defaults if option not in custom_options: issues_found.append((line_number, ValidationError.IS_DEFAULT, option)) # replace aliases with their recognized representation if option in CONFIG['torrc.alias']: option = CONFIG['torrc.alias'][option] # tor appears to replace tabs with a space, for instance: # "accept\t*:563" is read back as "accept *:563" value = value.replace('\t', ' ') # parse value if it's a size or time, expanding the units value, value_type = _parse_conf_value(value) # issues GETCONF to get the values tor's currently configured to use tor_values = controller.get_conf(option, [], True) # multiline entries can be comma separated values (for both tor and conf) value_list = [value] if option in get_multiline_parameters(): value_list = [val.strip() for val in value.split(',')] fetched_values, tor_values = tor_values, [] for fetched_value in fetched_values: for fetched_entry in fetched_value.split(','): fetched_entry = fetched_entry.strip() if fetched_entry not in tor_values: tor_values.append(fetched_entry) for val in value_list: # checks if both the argument and tor's value are empty is_blank_match = not val and not tor_values if not is_blank_match and val not in tor_values: # converts corrections to reader friedly size values display_values = tor_values if value_type == ValueType.SIZE: display_values = [str_tools.size_label(int(val)) for val in tor_values] elif value_type == ValueType.TIME: display_values = [str_tools.time_label(int(val)) for val in tor_values] issues_found.append((line_number, ValidationError.MISMATCH, ', '.join(display_values))) # checks if any custom options are missing from the torrc for option in custom_options: # In new versions the 'DirReqStatistics' option is true by default and # disabled on startup if geoip lookups are unavailable. If this option is # missing then that's most likely the reason. # # https://trac.torproject.org/projects/tor/ticket/4237 if option == 'DirReqStatistics': continue if option not in seen_options: issues_found.append((None, ValidationError.MISSING, option)) return issues_found
from stem.util import str_tools # provides a mapping of observed bandwidth to the relay nicknames def get_bw_to_relay(): bw_to_relay = {} try: for desc in stem.descriptor.remote.get_server_descriptors().run(): if desc.exit_policy.is_exiting_allowed(): bw_to_relay.setdefault(desc.observed_bandwidth, []).append(desc.nickname) except Exception as exc: print('Unable to retrieve the server descriptors: %s' % exc) return bw_to_relay # prints the top fifteen relays bw_to_relay = get_bw_to_relay() count = 1 for bw_value in sorted(bw_to_relay.keys(), reverse = True): for nickname in bw_to_relay[bw_value]: print('%i. %s (%s/s)' % (count, nickname, str_tools.size_label(bw_value, 2))) count += 1 if count > 15: sys.exit()
def get_sampling(last_sampling = None): controller = tor_controller() retrieved = time.time() pid = controller.get_pid('') tor_resources = tracker.get_resource_tracker().get_value() nyx_total_cpu_time = sum(os.times()[:3]) or_listeners = controller.get_listeners(Listener.OR, []) control_listeners = controller.get_listeners(Listener.CONTROL, []) if controller.get_conf('HashedControlPassword', None): auth_type = 'password' elif controller.get_conf('CookieAuthentication', None) == '1': auth_type = 'cookie' else: auth_type = 'open' try: fd_used = proc.file_descriptors_used(pid) except IOError: fd_used = None if last_sampling: nyx_cpu_delta = nyx_total_cpu_time - last_sampling.nyx_total_cpu_time nyx_time_delta = retrieved - last_sampling.retrieved python_cpu_time = nyx_cpu_delta / nyx_time_delta sys_call_cpu_time = 0.0 # TODO: add a wrapper around call() to get this nyx_cpu = python_cpu_time + sys_call_cpu_time else: nyx_cpu = 0.0 attr = { 'retrieved': retrieved, 'is_connected': controller.is_alive(), 'connection_time': controller.connection_time(), 'last_heartbeat': time.strftime('%H:%M %m/%d/%Y', time.localtime(controller.get_latest_heartbeat())), 'fingerprint': controller.get_info('fingerprint', 'Unknown'), 'nickname': controller.get_conf('Nickname', ''), 'newnym_wait': controller.get_newnym_wait(), 'exit_policy': controller.get_exit_policy(None), 'flags': getattr(controller.get_network_status(default = None), 'flags', []), 'version': str(controller.get_version('Unknown')).split()[0], 'version_status': controller.get_info('status/version/current', 'Unknown'), 'address': or_listeners[0][0] if (or_listeners and or_listeners[0][0] != '0.0.0.0') else controller.get_info('address', 'Unknown'), 'or_port': or_listeners[0][1] if or_listeners else '', 'dir_port': controller.get_conf('DirPort', '0'), 'control_port': str(control_listeners[0][1]) if control_listeners else None, 'socket_path': controller.get_conf('ControlSocket', None), 'is_relay': bool(or_listeners), 'auth_type': auth_type, 'pid': pid, 'start_time': system.start_time(pid), 'fd_limit': int(controller.get_info('process/descriptor-limit', '-1')), 'fd_used': fd_used, 'nyx_total_cpu_time': nyx_total_cpu_time, 'tor_cpu': '%0.1f' % (100 * tor_resources.cpu_sample), 'nyx_cpu': nyx_cpu, 'memory': str_tools.size_label(tor_resources.memory_bytes) if tor_resources.memory_bytes > 0 else 0, 'memory_percent': '%0.1f' % (100 * tor_resources.memory_percent), 'hostname': os.uname()[1], 'platform': '%s %s' % (os.uname()[0], os.uname()[2]), # [platform name] [version] } class Sampling(collections.namedtuple('Sampling', attr.keys())): def __init__(self, **attr): super(Sampling, self).__init__(**attr) self._attr = attr def format(self, message, crop_width = None): formatted_msg = message.format(**self._attr) if crop_width: formatted_msg = str_tools.crop(formatted_msg, crop_width) return formatted_msg return Sampling(**attr)
# provides a mapping of observed bandwidth to the relay nicknames def get_bw_to_relay(): bw_to_relay = {} try: for desc in stem.descriptor.remote.get_server_descriptors().run(): if desc.exit_policy.is_exiting_allowed(): bw_to_relay.setdefault(desc.observed_bandwidth, []).append(desc.nickname) except Exception as exc: print("Unable to retrieve the server descriptors: %s" % exc) return bw_to_relay # prints the top fifteen relays bw_to_relay = get_bw_to_relay() count = 1 for bw_value in sorted(bw_to_relay.keys(), reverse=True): for nickname in bw_to_relay[bw_value]: print("%i. %s (%s/s)" % (count, nickname, str_tools.size_label(bw_value, 2))) count += 1 if count > 15: sys.exit()