def main() -> None: arg_parser = argparse.ArgumentParser() arg_parser.add_argument( "config_file", type=argparse.FileType("r"), help="path to a configuration file" ) arg_parser.add_argument( "--debug", default=False, action="store_true", help="enable debug logging" ) arg_parser.add_argument( "--once", default=False, action="store_true", help="only run the fetcher once rather than as a daemon", ) args = arg_parser.parse_args() if args.debug: level = logging.DEBUG else: level = logging.INFO logging.basicConfig(format="%(asctime)s:%(levelname)s:%(message)s", level=level) parser = configparser.RawConfigParser(interpolation=EnvironmentInterpolation()) parser.read_file(args.config_file) fetcher_config = dict(parser.items("secret-fetcher")) cfg = config.parse_config( fetcher_config, { "vault": { "url": config.DefaultFromEnv(config.String, "BASEPLATE_DEFAULT_VAULT_URL"), "role": config.String, "auth_type": config.Optional( config.OneOf(**VaultClientFactory.auth_types()), default=VaultClientFactory.auth_types()["aws"], ), "mount_point": config.DefaultFromEnv( config.String, "BASEPLATE_VAULT_MOUNT_POINT", fallback="aws-ec2" ), }, "output": { "path": config.Optional(config.String, default="/var/local/secrets.json"), "owner": config.Optional(config.UnixUser, default=0), "group": config.Optional(config.UnixGroup, default=0), "mode": config.Optional(config.Integer(base=8), default=0o400), # type: ignore }, "secrets": config.Optional(config.TupleOf(config.String), default=[]), "callback": config.Optional(config.String), }, ) # pylint: disable=maybe-no-member client_factory = VaultClientFactory( cfg.vault.url, cfg.vault.role, cfg.vault.auth_type, cfg.vault.mount_point ) if args.once: logger.info("Running secret fetcher once") fetch_secrets(cfg, client_factory) trigger_callback(cfg.callback, cfg.output.path) else: logger.info("Running secret fetcher as a daemon") last_proc = None while True: soonest_expiration = fetch_secrets(cfg, client_factory) last_proc = trigger_callback(cfg.callback, cfg.output.path, last_proc) time_til_expiration = soonest_expiration - datetime.datetime.utcnow() time_to_sleep = time_til_expiration - VAULT_TOKEN_PREFETCH_TIME time.sleep(max(int(time_to_sleep.total_seconds()), 1))
def publish_traces() -> None: arg_parser = argparse.ArgumentParser() arg_parser.add_argument("config_file", type=argparse.FileType("r"), help="path to a configuration file") arg_parser.add_argument( "--queue-name", default="main", help="name of trace queue / publisher config (default: main)", ) arg_parser.add_argument("--debug", default=False, action="store_true", help="enable debug logging") arg_parser.add_argument( "--app-name", default="main", metavar="NAME", help="name of app to load from config_file (default: main)", ) args = arg_parser.parse_args() if args.debug: level = logging.DEBUG else: level = logging.WARNING logging.basicConfig(level=level) config_parser = configparser.RawConfigParser( interpolation=EnvironmentInterpolation()) config_parser.read_file(args.config_file) publisher_raw_cfg = dict( config_parser.items("trace-publisher:" + args.queue_name)) publisher_cfg = config.parse_config( publisher_raw_cfg, { "zipkin_api_url": config.DefaultFromEnv(config.Endpoint, "BASEPLATE_ZIPKIN_API_URL"), "post_timeout": config.Optional(config.Integer, POST_TIMEOUT_DEFAULT), "max_batch_size": config.Optional(config.Integer, MAX_BATCH_SIZE_DEFAULT), "retry_limit": config.Optional(config.Integer, RETRY_LIMIT_DEFAULT), "max_queue_size": config.Optional(config.Integer, MAX_QUEUE_SIZE), }, ) trace_queue = MessageQueue( "/traces-" + args.queue_name, max_messages=publisher_cfg.max_queue_size, max_message_size=MAX_SPAN_SIZE, ) # pylint: disable=maybe-no-member inner_batch = TraceBatch(max_size=publisher_cfg.max_batch_size) batcher = TimeLimitedBatch(inner_batch, MAX_BATCH_AGE) metrics_client = metrics_client_from_config(publisher_raw_cfg) publisher = ZipkinPublisher( publisher_cfg.zipkin_api_url.address, metrics_client, post_timeout=publisher_cfg.post_timeout, ) while True: message: Optional[bytes] try: message = trace_queue.get(timeout=0.2) except TimedOutError: message = None try: batcher.add(message) except BatchFull: serialized = batcher.serialize() publisher.publish(serialized) batcher.reset() batcher.add(message)
def test_fallback(self): fallback_value = 5 parser = config.DefaultFromEnv(config.Integer, "NOT_PROVIDED", fallback_value) self.assertEqual(parser(""), fallback_value)
def test_provide_none(self): parser = config.DefaultFromEnv(config.String, "NOT_PROVIDED") self.assertRaises(ValueError, parser, "")
def test_use_provided(self): parser = config.DefaultFromEnv(config.String, "BASEPALTE_DEFAULT_VALUE") self.assertEqual(parser("foo"), "foo")
def test_empty_default(self): parser = config.DefaultFromEnv(config.String, "NOT_PROVIDED") self.assertEqual(parser("foo"), "foo")
def test_use_default_from_env(self): parser = config.DefaultFromEnv(config.String, "BASEPLATE_DEFAULT_VALUE") self.assertEqual(parser(""), "default")