예제 #1
0
def publish_traces():
    arg_parser = argparse.ArgumentParser()
    arg_parser.add_argument("config_file", type=argparse.FileType("r"),
        help="path to a configuration file")
    arg_parser.add_argument("--queue-name", default="main",
        help="name of trace queue / publisher config (default: main)")
    arg_parser.add_argument("--debug", default=False, action="store_true",
        help="enable debug logging")
    arg_parser.add_argument("--app-name", default="main", metavar="NAME",
        help="name of app to load from config_file (default: main)")
    args = arg_parser.parse_args()

    if args.debug:
        level = logging.DEBUG
    else:
        level = logging.WARNING
    logging.basicConfig(level=level)

    config_parser = configparser.RawConfigParser()
    config_parser.readfp(args.config_file)

    publisher_raw_cfg = dict(config_parser.items("trace-publisher:" + args.queue_name))
    publisher_cfg = config.parse_config(publisher_raw_cfg, {
        "zipkin_api_url": config.Endpoint,
        "post_timeout": config.Optional(config.Integer, POST_TIMEOUT_DEFAULT),
        "max_batch_size": config.Optional(config.Integer, MAX_BATCH_SIZE_DEFAULT),
        "retry_limit": config.Optional(config.Integer, RETRY_LIMIT_DEFAULT),
    })

    trace_queue = MessageQueue(
        "/traces-" + args.queue_name,
        max_messages=MAX_QUEUE_SIZE,
        max_message_size=MAX_SPAN_SIZE,
    )

    # pylint: disable=maybe-no-member
    inner_batch = TraceBatch(max_size=publisher_cfg.max_batch_size)
    batcher = TimeLimitedBatch(inner_batch, MAX_BATCH_AGE)
    metrics_client = metrics_client_from_config(publisher_raw_cfg)
    publisher = ZipkinPublisher(
        publisher_cfg.zipkin_api_url.address,
        metrics_client,
        post_timeout=publisher_cfg.post_timeout,
    )

    while True:
        try:
            message = trace_queue.get(timeout=.2)
        except TimedOutError:
            message = None

        try:
            batcher.add(message)
        except BatchFull:
            serialized = batcher.serialize()
            publisher.publish(serialized)
            batcher.reset()
            batcher.add(message)
예제 #2
0
def make_processor(app_config):  # pragma: nocover
    cfg = config.parse_config(app_config, {
        "activity": {
            "window": config.Timespan,
        },
        "tracing": {
            "endpoint": config.Optional(config.Endpoint),
            "service_name": config.String,
        },
        "redis": {
            "url": config.String,
            "max_connections": config.Optional(config.Integer, default=100),
        },
    })

    metrics_client = metrics_client_from_config(app_config)
    tracing_client = tracing_client_from_config(app_config)
    error_reporter = error_reporter_from_config(app_config, __name__)
    redis_pool = redis.BlockingConnectionPool.from_url(
        cfg.redis.url,
        max_connections=cfg.redis.max_connections,
        timeout=0.1,
    )

    baseplate = Baseplate()
    baseplate.configure_logging()
    baseplate.configure_metrics(metrics_client)
    baseplate.configure_tracing(tracing_client)
    baseplate.configure_error_reporting(error_reporter)
    baseplate.add_to_context("redis", RedisContextFactory(redis_pool))

    counter = ActivityCounter(cfg.activity.window.total_seconds())
    handler = Handler(counter=counter)
    processor = ActivityService.ContextProcessor(handler)
    event_handler = BaseplateProcessorEventHandler(logger, baseplate)
    processor.setEventHandler(event_handler)

    return processor
예제 #3
0
def make_app(raw_config):
    cfg = config.parse_config(raw_config, CONFIG_SPEC)

    metrics_client = metrics_client_from_config(raw_config)
    error_reporter = error_reporter_from_config(raw_config, __name__)
    secrets = secrets_store_from_config(raw_config)

    dispatcher = MessageDispatcher(metrics=metrics_client)

    source = MessageSource(
        config=cfg.amqp,
    )

    app = SocketServer(
        metrics=metrics_client,
        dispatcher=dispatcher,
        secrets=secrets,
        error_reporter=error_reporter,
        ping_interval=cfg.web.ping_interval,
        admin_auth=cfg.web.admin_auth,
        conn_shed_rate=cfg.web.conn_shed_rate,
    )

    # register SIGUSR2 to trigger app quiescing,
    #  useful if app processes are behind
    #  a process manager like einhorn.
    def _handle_quiesce_signal(_, frame):
        app._quiesce({}, bypass_auth=True)

    signal.signal(signal.SIGUSR2, _handle_quiesce_signal)
    signal.siginterrupt(signal.SIGUSR2, False)

    source.message_handler = dispatcher.on_message_received
    app.status_publisher = source.send_message

    gevent.spawn(source.pump_messages)

    return app
예제 #4
0
def publish_events():
    arg_parser = argparse.ArgumentParser()
    arg_parser.add_argument("config_file",
                            type=argparse.FileType("r"),
                            help="path to a configuration file")
    arg_parser.add_argument(
        "--queue-name",
        default="main",
        help="name of event queue / publisher config (default: main)",
    )
    arg_parser.add_argument("--debug",
                            default=False,
                            action="store_true",
                            help="enable debug logging")
    args = arg_parser.parse_args()

    if args.debug:
        level = logging.DEBUG
    else:
        level = logging.WARNING
    logging.basicConfig(level=level)

    config_parser = configparser.RawConfigParser()
    config_parser.readfp(args.config_file)  # pylint: disable=deprecated-method
    raw_config = dict(config_parser.items("event-publisher:" +
                                          args.queue_name))
    cfg = config.parse_config(
        raw_config,
        {
            "collector": {
                "hostname": config.String,
                "version": config.Optional(config.Integer, default=1),
            },
            "key": {
                "name": config.String,
                "secret": config.Base64
            },
        },
    )

    metrics_client = metrics_client_from_config(raw_config)

    event_queue = MessageQueue("/events-" + args.queue_name,
                               max_messages=MAX_QUEUE_SIZE,
                               max_message_size=MAX_EVENT_SIZE)

    # pylint: disable=maybe-no-member
    serializer = SERIALIZER_BY_VERSION[cfg.collector.version]()
    batcher = TimeLimitedBatch(serializer, MAX_BATCH_AGE)
    publisher = BatchPublisher(metrics_client, cfg)

    while True:
        try:
            message = event_queue.get(timeout=0.2)
        except TimedOutError:
            message = None

        try:
            batcher.add(message)
        except BatchFull:
            serialized = batcher.serialize()
            publisher.publish(serialized)
            batcher.reset()
            batcher.add(message)