Example #1
0
def _color_attr():
  """
  Initializes color mappings usable by curses. This can only be done after
  calling curses.initscr().
  """

  global COLOR_ATTR

  if COLOR_ATTR is None:
    if not CONFIG['features.colorInterface']:
      COLOR_ATTR = DEFAULT_COLOR_ATTR
    elif curses.has_colors():
      color_attr = dict(DEFAULT_COLOR_ATTR)

      for color_pair, color_name in enumerate(CURSES_COLORS):
        foreground_color = CURSES_COLORS[color_name]
        background_color = -1  # allows for default (possibly transparent) background
        curses.init_pair(color_pair + 1, foreground_color, background_color)
        color_attr[color_name] = curses.color_pair(color_pair + 1)

      log.info('setup.color_support_available')
      COLOR_ATTR = color_attr
    else:
      log.info('setup.color_support_unavailable')
      COLOR_ATTR = DEFAULT_COLOR_ATTR

  return COLOR_ATTR
Example #2
0
def _color_attr():
    """
  Initializes color mappings usable by curses. This can only be done after
  calling curses.initscr().
  """

    global COLOR_ATTR

    if COLOR_ATTR is None:
        if not CONFIG['features.colorInterface']:
            COLOR_ATTR = DEFAULT_COLOR_ATTR
        elif curses.has_colors():
            color_attr = dict(DEFAULT_COLOR_ATTR)

            for color_pair, color_name in enumerate(CURSES_COLORS):
                foreground_color = CURSES_COLORS[color_name]
                background_color = -1  # allows for default (possibly transparent) background
                curses.init_pair(color_pair + 1, foreground_color,
                                 background_color)
                color_attr[color_name] = curses.color_pair(color_pair + 1)

            log.info('setup.color_support_available')
            COLOR_ATTR = color_attr
        else:
            log.info('setup.color_support_unavailable')
            COLOR_ATTR = DEFAULT_COLOR_ATTR

    return COLOR_ATTR
Example #3
0
  def _task(self, process_pid, process_name):
    try:
      resolver = _resources_via_proc if self._use_proc else _resources_via_ps
      total_cpu_time, uptime, memory_in_bytes, memory_in_percent = resolver(process_pid)

      if self._resources:
        cpu_sample = (total_cpu_time - self._resources.cpu_total) / self._resources.cpu_total
      else:
        cpu_sample = 0.0  # we need a prior datapoint to give a sampling

      self._resources = Resources(
        cpu_sample = cpu_sample,
        cpu_average = total_cpu_time / uptime,
        cpu_total = total_cpu_time,
        memory_bytes = memory_in_bytes,
        memory_percent = memory_in_percent,
        timestamp = time.time(),
      )

      self._failure_count = 0
      return True
    except IOError as exc:
      self._failure_count += 1

      if self._use_proc:
        if self._failure_count >= 3:
          # We've failed three times resolving via proc. Warn, and fall back
          # to ps resolutions.

          self._use_proc = False
          self._failure_count = 0

          log.info(
            'tracker.abort_getting_resources',
            resolver = 'proc',
            response = 'falling back to ps',
            exc = exc,
          )
        else:
          log.debug('tracker.unable_to_get_resources', resolver = 'proc', exc = exc)
      else:
        if self._failure_count >= 3:
          # Give up on further attempts.

          log.info(
            'tracker.abort_getting_resources',
            resolver = 'ps',
            response = 'giving up on getting resource usage information',
            exc = exc,
          )

          self.stop()
        else:
          log.debug('tracker.unable_to_get_resources', resolver = 'ps', exc = exc)

      return False
Example #4
0
  def __init__(self):
    nyx.panel.DaemonPanel.__init__(self, UPDATE_RATE)

    logged_events = CONFIG['startup.events'].split(',')
    tor_events = tor_controller().get_info('events/names', '').split()
    invalid_events = filter(lambda event: not event.startswith('NYX_') and event not in tor_events, logged_events)

    if invalid_events:
      logged_events = ['NOTICE', 'WARN', 'ERR', 'NYX_NOTICE', 'NYX_WARNING', 'NYX_ERROR']
      log.warn("Your --log argument had the following events tor doesn't recognize: %s" % ', '.join(invalid_events))

    self._event_log = nyx.log.LogGroup(CONFIG['cache.log_panel.size'], group_by_day = True)
    self._event_log_paused = None
    self._event_types = nyx.log.listen_for_events(self._register_tor_event, logged_events)
    self._log_file = nyx.log.LogFileOutput(CONFIG['features.logFile'])
    self._filter = nyx.log.LogFilters(initial_filters = CONFIG['features.log.regex'])
    self._show_duplicates = CONFIG['features.log.showDuplicateEntries']

    self._scroller = nyx.curses.Scroller()
    self._has_new_event = False
    self._last_day = nyx.log.day_count(time.time())

    # fetches past tor events from log file, if available

    if CONFIG['features.log.prepopulate']:
      log_location = nyx.log.log_file_path(tor_controller())

      if log_location:
        try:
          for entry in reversed(list(nyx.log.read_tor_log(log_location, CONFIG['features.log.prepopulateReadLimit']))):
            if entry.type in self._event_types:
              self._event_log.add(entry)
        except IOError as exc:
          log.info('Unable to read log located at %s: %s' % (log_location, exc))
        except ValueError as exc:
          log.info(str(exc))

    self._last_content_height = len(self._event_log)  # height of the rendered content when last drawn

    # merge NYX_LOGGER into us, and listen for its future events

    for event in NYX_LOGGER:
      self._register_nyx_event(event)

    NYX_LOGGER.emit = self._register_nyx_event
Example #5
0
File: log.py Project: patacca/nyx
  def __init__(self):
    nyx.panel.DaemonPanel.__init__(self, 'log', UPDATE_RATE)

    logged_events = nyx.arguments.expand_events(CONFIG['startup.events'])
    self._event_log = nyx.log.LogGroup(CONFIG['cache.log_panel.size'], group_by_day = True)
    self._event_log_paused = None
    self._event_types = nyx.log.listen_for_events(self._register_tor_event, logged_events)
    self._log_file = nyx.log.LogFileOutput(CONFIG['features.logFile'])
    self._filter = nyx.log.LogFilters(initial_filters = CONFIG['features.log.regex'])
    self._show_duplicates = CONFIG['features.log.showDuplicateEntries']

    self._scroller = nyx.curses.Scroller()
    self._has_new_event = False
    self._last_day = nyx.log.day_count(time.time())

    # fetches past tor events from log file, if available

    if CONFIG['features.log.prepopulate']:
      log_location = nyx.log.log_file_path(tor_controller())

      if log_location:
        try:
          for entry in reversed(list(nyx.log.read_tor_log(log_location, CONFIG['features.log.prepopulateReadLimit']))):
            if entry.type in self._event_types:
              self._event_log.add(entry)
        except IOError as exc:
          log.info('Unable to read log located at %s: %s' % (log_location, exc))
        except ValueError as exc:
          log.info(str(exc))

    self._last_content_height = len(self._event_log)  # height of the rendered content when last drawn

    # merge NYX_LOGGER into us, and listen for its future events

    for event in NYX_LOGGER:
      self._register_nyx_event(event)

    NYX_LOGGER.emit = self._register_nyx_event
Example #6
0
  def _task(self, process_pid, process_name):
    local_ports = self._last_requested_local_ports
    remote_ports = self._last_requested_remote_ports

    if not local_ports and not remote_ports:
      return True

    result = {}

    # Use cached results from our last lookup if available.

    for port, process in self._processes_for_ports.items():
      if port in local_ports:
        result[port] = process
        local_ports.remove(port)
      elif port in remote_ports:
        result[port] = process
        remote_ports.remove(port)

    try:
      if local_ports or remote_ports:
        result.update(_process_for_ports(local_ports, remote_ports))

      self._processes_for_ports = result
      self._failure_count = 0
      return True
    except IOError as exc:
      self._failure_count += 1

      if self._failure_count >= 3:
        log.info('tracker.abort_getting_port_usage', exc = exc)
        self.stop()
      else:
        log.debug('tracker.unable_to_get_port_usages', exc = exc)

      return False
Example #7
0
  def __init__(self, rate):
    super(ConnectionTracker, self).__init__(rate)

    self._connections = []
    self._start_times = {}  # connection => (unix_timestamp, is_legacy)
    self._custom_resolver = None
    self._is_first_run = True

    # Number of times in a row we've either failed with our current resolver or
    # concluded that our rate is too low.

    self._failure_count = 0
    self._rate_too_low_count = 0

    # If 'DisableDebuggerAttachment 0' is set we can do normal connection
    # resolution. Otherwise connection resolution by inference is the only game
    # in town.

    if tor_controller().get_conf('DisableDebuggerAttachment', None) == '0':
      self._resolvers = connection.system_resolvers()
    else:
      self._resolvers = [CustomResolver.INFERENCE]

    log.info('tracker.available_resolvers', os = os.uname()[0], resolvers = ', '.join(self._resolvers))
Example #8
0
  def _task(self, process_pid, process_name):
    if self._custom_resolver:
      resolver = self._custom_resolver
      is_default_resolver = False
    elif self._resolvers:
      resolver = self._resolvers[0]
      is_default_resolver = True
    else:
      return False  # nothing to resolve with

    try:
      start_time = time.time()
      new_connections, new_start_times = [], {}

      if resolver == CustomResolver.INFERENCE:
        # provide connections going to a relay or one of our tor ports

        connections = []
        controller = tor_controller()
        consensus_tracker = get_consensus_tracker()

        for conn in proc.connections(user = controller.get_user(None)):
          if conn.remote_port in consensus_tracker.get_relay_fingerprints(conn.remote_address):
            connections.append(conn)  # outbound to another relay
          elif conn.local_port in controller.get_ports(stem.control.Listener.OR, []):
            connections.append(conn)  # inbound to our ORPort
          elif conn.local_port in controller.get_ports(stem.control.Listener.DIR, []):
            connections.append(conn)  # inbound to our DirPort
          elif conn.local_port in controller.get_ports(stem.control.Listener.CONTROL, []):
            connections.append(conn)  # controller connection
      else:
        connections = connection.get_connections(resolver, process_pid = process_pid, process_name = process_name)

      for conn in connections:
        conn_start_time, is_legacy = self._start_times.get(conn, (start_time, self._is_first_run))
        new_start_times[conn] = (conn_start_time, is_legacy)
        new_connections.append(Connection(conn_start_time, is_legacy, *conn))

      self._connections = new_connections
      self._start_times = new_start_times
      self._is_first_run = False

      runtime = time.time() - start_time

      if is_default_resolver:
        self._failure_count = 0

      # Reduce our rate if connection resolution is taking a long time. This is
      # most often an issue for extremely busy relays.

      min_rate = 100 * runtime

      if self.get_rate() < min_rate:
        self._rate_too_low_count += 1

        if self._rate_too_low_count >= 3:
          min_rate += 1  # little extra padding so we don't frequently update this
          self.set_rate(min_rate)
          self._rate_too_low_count = 0
          log.debug('tracker.lookup_rate_increased', seconds = "%0.1f" % min_rate)
      else:
        self._rate_too_low_count = 0

      return True
    except IOError as exc:
      log.info('wrap', text = exc)

      # Fail over to another resolver if we've repeatedly been unable to use
      # this one.

      if is_default_resolver:
        self._failure_count += 1

        if self._failure_count >= 3:
          self._resolvers.pop(0)
          self._failure_count = 0

          if self._resolvers:
            log.notice(
              'tracker.unable_to_use_resolver',
              old_resolver = resolver,
              new_resolver = self._resolvers[0],
            )
          else:
            log.notice('tracker.unable_to_use_all_resolvers')

      return False
Example #9
0
    def __init__(self):
        nyx.panel.DaemonPanel.__init__(self, UPDATE_RATE)

        logged_events = list(map(str.strip,
                                 CONFIG['logged_events'].split(',')))

        for alias, actual_event in EVENT_ALIASES.items():
            if alias in logged_events:
                logged_events.remove(alias)
                logged_events.append(actual_event)

        tor_events = tor_controller().get_info('events/names', '').split()
        invalid_events = list(
            filter(
                lambda event: not event.startswith('NYX_') and event not in
                tor_events, logged_events))

        if invalid_events:
            logged_events = [
                'NOTICE', 'WARN', 'ERR', 'NYX_NOTICE', 'NYX_WARNING',
                'NYX_ERROR'
            ]
            log.warn(
                "Your --log argument had the following events tor doesn't recognize: %s"
                % ', '.join(invalid_events))

        self._event_log = nyx.log.LogGroup(CONFIG['max_log_size'])
        self._event_log_paused = None
        self._event_types = nyx.log.listen_for_events(self._register_tor_event,
                                                      logged_events)
        self._log_file = nyx.log.LogFileOutput(CONFIG['write_logs_to'])
        self._filter = nyx.log.LogFilters(
            initial_filters=CONFIG['logging_filter'])
        self._show_duplicates = not CONFIG['deduplicate_log']

        self._scroller = nyx.curses.Scroller()
        self._has_new_event = False
        self._last_day = nyx.log.day_count(time.time())

        # fetches past tor events from log file, if available

        if CONFIG['prepopulate_log']:
            log_location = nyx.log.log_file_path(tor_controller())

            if log_location:
                try:
                    for entry in reversed(
                            list(
                                nyx.log.read_tor_log(
                                    log_location,
                                    CONFIG['prepopulate_read_limit']))):
                        if entry.type in self._event_types:
                            self._event_log.add(entry)
                except IOError as exc:
                    log.info('Unable to read log located at %s: %s' %
                             (log_location, exc))
                except ValueError as exc:
                    log.info(str(exc))

        self._last_content_height = len(
            self._event_log)  # height of the rendered content when last drawn

        # merge NYX_LOGGER into us, and listen for its future events

        while not NYX_LOG_BUFFER.empty():
            self._register_nyx_event(NYX_LOG_BUFFER.get_nowait())

        NYX_LOGGER.emit = self._register_nyx_event