def runServer(host="0.0.0.0", port=None, protocol=Protocol.PLAIN,
              transport=Transport.TCP):
  Protocol.current = protocol
  if port is None:
    port = Protocol.getDefaultPort(protocol)

  LOGGER.info("Starting with host=%s, port=%s, protocol=%s, transport=%s",
              host, port, protocol, transport)

  if transport == Transport.UDP:
    server = ThreadedUDPServer((host, port), UDPHandler)
  elif transport == Transport.TCP:
    server = ThreadedTCPServer((host, port), TCPHandler)

  config = Config("application.conf",
                  os.environ["APPLICATION_CONFIG_PATH"])

  global gQueueName
  gQueueName = config.get("metric_listener", "queue_name")

  global gProfiling
  gProfiling = (config.getboolean("debugging", "profiling") or
                LOGGER.isEnabledFor(logging.DEBUG))

  # Serve until there is an interrupt
  server.serve_forever()
コード例 #2
0
def runServer(host="0.0.0.0", port=None, protocol=Protocol.PLAIN,
              transport=Transport.TCP):
  Protocol.current = protocol
  if port is None:
    port = Protocol.getDefaultPort(protocol)

  LOGGER.info("Starting with host=%s, port=%s, protocol=%s, transport=%s",
              host, port, protocol, transport)

  if transport == Transport.UDP:
    server = ThreadedUDPServer((host, port), UDPHandler)
  elif transport == Transport.TCP:
    server = ThreadedTCPServer((host, port), TCPHandler)

  config = Config("application.conf",
                  os.environ["APPLICATION_CONFIG_PATH"])

  global gQueueName
  gQueueName = config.get("metric_listener", "queue_name")

  global gProfiling
  gProfiling = (config.getboolean("debugging", "profiling") or
                LOGGER.isEnabledFor(logging.DEBUG))

  # Serve until there is an interrupt
  server.serve_forever()
コード例 #3
0
def runServer():
  # Get the current list of custom metrics
  appConfig = Config("application.conf",
                     os.environ.get("APPLICATION_CONFIG_PATH"))

  engine = repository.engineFactory(appConfig)
  global gCustomMetrics
  now = datetime.datetime.utcnow()

  with engine.connect() as conn:
    gCustomMetrics = dict(
      (m.name, [m, now]) for m in repository.getCustomMetrics(conn))

  queueName = appConfig.get("metric_listener", "queue_name")

  global gProfiling
  gProfiling = (appConfig.getboolean("debugging", "profiling") or
                LOGGER.isEnabledFor(logging.DEBUG))
  del appConfig

  metricStreamer = MetricStreamer()
  modelSwapper = ModelSwapperInterface()

  with MessageBusConnector() as bus:
    if not bus.isMessageQeueuePresent(queueName):
      bus.createMessageQueue(mqName=queueName, durable=True)
    LOGGER.info("Waiting for messages. To exit, press CTRL+C")
    with bus.consume(queueName) as consumer:
      messages = []
      messageRxTimes = []
      while True:
        message = consumer.pollOneMessage()
        if message is not None:
          messages.append(message)
          if gProfiling:
            messageRxTimes.append(time.time())

        if message is None or len(messages) >= MAX_MESSAGES_PER_BATCH:
          if messages:
            # Process the batch
            try:
              _handleBatch(engine,
                           messages,
                           messageRxTimes,
                           metricStreamer,
                           modelSwapper)
            except Exception:  # pylint: disable=W0703
              LOGGER.exception("Unknown failure in processing messages.")
              # Make sure that we ack messages when there is an unexpected error
              # to avoid getting hung forever on one bad record.

            # Ack all the messages
            messages[-1].ack(multiple=True)
            # Clear the message buffer
            messages = []
            messageRxTimes = []
          else:
            # Queue is empty, wait before retrying
            time.sleep(POLL_DELAY_SEC)
コード例 #4
0
def runServer():
  # Get the current list of custom metrics
  appConfig = Config("application.conf",
                     os.environ["APPLICATION_CONFIG_PATH"])

  engine = repository.engineFactory(appConfig)
  global gCustomMetrics
  now = datetime.datetime.utcnow()

  with engine.connect() as conn:
    gCustomMetrics = dict(
      (m.name, [m, now]) for m in repository.getCustomMetrics(conn))

  queueName = appConfig.get("metric_listener", "queue_name")

  global gProfiling
  gProfiling = (appConfig.getboolean("debugging", "profiling") or
                LOGGER.isEnabledFor(logging.DEBUG))
  del appConfig

  metricStreamer = MetricStreamer()
  modelSwapper = ModelSwapperInterface()

  with MessageBusConnector() as bus:
    if not bus.isMessageQeueuePresent(queueName):
      bus.createMessageQueue(mqName=queueName, durable=True)
    LOGGER.info("Waiting for messages. To exit, press CTRL+C")
    with bus.consume(queueName) as consumer:
      messages = []
      messageRxTimes = []
      while True:
        message = consumer.pollOneMessage()
        if message is not None:
          messages.append(message)
          if gProfiling:
            messageRxTimes.append(time.time())

        if message is None or len(messages) >= MAX_MESSAGES_PER_BATCH:
          if messages:
            # Process the batch
            try:
              _handleBatch(engine,
                           messages,
                           messageRxTimes,
                           metricStreamer,
                           modelSwapper)
            except Exception:  # pylint: disable=W0703
              LOGGER.exception("Unknown failure in processing messages.")
              # Make sure that we ack messages when there is an unexpected error
              # to avoid getting hung forever on one bad record.

            # Ack all the messages
            messages[-1].ack(multiple=True)
            # Clear the message buffer
            messages = []
            messageRxTimes = []
          else:
            # Queue is empty, wait before retrying
            time.sleep(POLL_DELAY_SEC)