def publish_traces() -> None: arg_parser = argparse.ArgumentParser() arg_parser.add_argument("config_file", type=argparse.FileType("r"), help="path to a configuration file") arg_parser.add_argument( "--queue-name", default="main", help="name of trace queue / publisher config (default: main)", ) arg_parser.add_argument("--debug", default=False, action="store_true", help="enable debug logging") arg_parser.add_argument( "--app-name", default="main", metavar="NAME", help="name of app to load from config_file (default: main)", ) args = arg_parser.parse_args() if args.debug: level = logging.DEBUG else: level = logging.WARNING logging.basicConfig(level=level) config_parser = configparser.RawConfigParser( interpolation=EnvironmentInterpolation()) config_parser.read_file(args.config_file) publisher_raw_cfg = dict( config_parser.items("trace-publisher:" + args.queue_name)) publisher_cfg = config.parse_config( publisher_raw_cfg, { "zipkin_api_url": config.DefaultFromEnv(config.Endpoint, "BASEPLATE_ZIPKIN_API_URL"), "post_timeout": config.Optional(config.Integer, POST_TIMEOUT_DEFAULT), "max_batch_size": config.Optional(config.Integer, MAX_BATCH_SIZE_DEFAULT), "retry_limit": config.Optional(config.Integer, RETRY_LIMIT_DEFAULT), "max_queue_size": config.Optional(config.Integer, MAX_QUEUE_SIZE), }, ) trace_queue = MessageQueue( "/traces-" + args.queue_name, max_messages=publisher_cfg.max_queue_size, max_message_size=MAX_SPAN_SIZE, ) # pylint: disable=maybe-no-member inner_batch = TraceBatch(max_size=publisher_cfg.max_batch_size) batcher = TimeLimitedBatch(inner_batch, MAX_BATCH_AGE) metrics_client = metrics_client_from_config(publisher_raw_cfg) publisher = ZipkinPublisher( publisher_cfg.zipkin_api_url.address, metrics_client, post_timeout=publisher_cfg.post_timeout, ) while True: message: Optional[bytes] try: message = trace_queue.get(timeout=0.2) except TimedOutError: message = None try: batcher.add(message) except BatchFull: serialized = batcher.serialize() publisher.publish(serialized) batcher.reset() batcher.add(message)
def publish_events() -> None: arg_parser = argparse.ArgumentParser() arg_parser.add_argument("config_file", type=argparse.FileType("r"), help="path to a configuration file") arg_parser.add_argument( "--queue-name", default="main", help="name of event queue / publisher config (default: main)", ) arg_parser.add_argument("--debug", default=False, action="store_true", help="enable debug logging") args = arg_parser.parse_args() if args.debug: level = logging.DEBUG else: level = logging.WARNING logging.basicConfig(level=level) config_parser = configparser.RawConfigParser( interpolation=EnvironmentInterpolation()) config_parser.read_file(args.config_file) raw_config = dict(config_parser.items("event-publisher:" + args.queue_name)) cfg = config.parse_config( raw_config, { "collector": { "hostname": config.String, "version": config.Optional(config.String, default="2"), "scheme": config.Optional(config.String, default="https"), }, "key": { "name": config.String, "secret": config.Base64 }, "max_queue_size": config.Optional(config.Integer, MAX_QUEUE_SIZE), }, ) metrics_client = metrics_client_from_config(raw_config) event_queue = MessageQueue( "/events-" + args.queue_name, max_messages=cfg.max_queue_size, max_message_size=MAX_EVENT_SIZE, ) # pylint: disable=maybe-no-member serializer = SERIALIZER_BY_VERSION[cfg.collector.version]() batcher = TimeLimitedBatch(serializer, MAX_BATCH_AGE) publisher = BatchPublisher(metrics_client, cfg) while True: message: Optional[bytes] try: message = event_queue.get(timeout=0.2) except TimedOutError: message = None try: batcher.add(message) continue except BatchFull: pass serialized = batcher.serialize() try: publisher.publish(serialized) except Exception: logger.exception("Events publishing failed.") batcher.reset() batcher.add(message)