Exemplo n.º 1
0
  def __init__(self):
    self._log = grok_logging.getExtendedLogger(self.__class__.__name__)

    self._profiling = (
      grok.app.config.getboolean("debugging", "profiling") or
      self._log.isEnabledFor(logging.DEBUG))

    self._pollInterval = grok.app.config.getfloat(
      "metric_collector", "poll_interval")

    self._metricErrorGracePeriod = grok.app.config.getfloat(
      "metric_collector", "metric_error_grace_period")

    # Interval for periodic garbage collection of our caches (e.g.,
    # self._metricInfoCache and self._resourceInfoCache)
    self._cacheGarbageCollectionIntervalSec = self._metricErrorGracePeriod * 2

    # Time (unix epoch) when to run the next garbage collection of
    # self._metricInfoCache and self._resourceInfoCache; 0 triggers it ASAP as a
    # quick test of the logic. See self._cacheGarbageCollectionIntervalSec.
    self._nextCacheGarbageCollectionTime = 0

    # We use this to cache info about metrics that helps us avoid unnecessary
    # queries to the datasource. The keys are metric uid's and corresponding
    # values are _MetricInfoCacheItem objects.
    self._metricInfoCache = defaultdict(_MetricInfoCacheItem)

    # We use this to cache info about resources referenced by metrics to help
    # us avoid unnecessary resource-status queries to the datasource. The keys
    # are resource cananonical identifiers (from Metric.server) and
    # corresponding values are _ResourceInfoCacheItem objects.
    self._resourceInfoCache = defaultdict(_ResourceInfoCacheItem)

    self.metricStreamer = MetricStreamer()
Exemplo n.º 2
0
def runServer():
  # Get the current list of custom metrics
  appConfig = Config("application.conf",
                     os.environ["APPLICATION_CONFIG_PATH"])

  engine = repository.engineFactory(appConfig)
  global gCustomMetrics
  now = datetime.datetime.utcnow()

  with engine.connect() as conn:
    gCustomMetrics = dict(
      (m.name, [m, now]) for m in repository.getCustomMetrics(conn))

  queueName = appConfig.get("metric_listener", "queue_name")

  global gProfiling
  gProfiling = (appConfig.getboolean("debugging", "profiling") or
                LOGGER.isEnabledFor(logging.DEBUG))
  del appConfig

  metricStreamer = MetricStreamer()
  modelSwapper = ModelSwapperInterface()

  with MessageBusConnector() as bus:
    if not bus.isMessageQeueuePresent(queueName):
      bus.createMessageQueue(mqName=queueName, durable=True)
    LOGGER.info("Waiting for messages. To exit, press CTRL+C")
    with bus.consume(queueName) as consumer:
      messages = []
      messageRxTimes = []
      while True:
        message = consumer.pollOneMessage()
        if message is not None:
          messages.append(message)
          if gProfiling:
            messageRxTimes.append(time.time())

        if message is None or len(messages) >= MAX_MESSAGES_PER_BATCH:
          if messages:
            # Process the batch
            try:
              _handleBatch(engine,
                           messages,
                           messageRxTimes,
                           metricStreamer,
                           modelSwapper)
            except Exception:  # pylint: disable=W0703
              LOGGER.exception("Unknown failure in processing messages.")
              # Make sure that we ack messages when there is an unexpected error
              # to avoid getting hung forever on one bad record.

            # Ack all the messages
            messages[-1].ack(multiple=True)
            # Clear the message buffer
            messages = []
            messageRxTimes = []
          else:
            # Queue is empty, wait before retrying
            time.sleep(POLL_DELAY_SEC)
    def __init__(self):
        # NOTE: the EC2InstanceMetricGetter instance and its process pool must be
        # created BEFORE this main (parent) process creates any global or
        # class-level shared resources that are also used by the pool workers (e.g.,
        # boto connections) that would have undersirable consequences when
        # replicated into and used by forked child processes (e.g., the same
        # connection socket file descriptor used by multiple processes).

        self._metricGetter = EC2InstanceMetricGetter()

        self._log = _getLogger()

        self.metricStreamer = MetricStreamer()