def init_sentry_client_from_config(raw_config: config.RawConfig, **kwargs: Any) -> None: """Configure the Sentry client. This expects one configuration option and can take many optional ones: ``sentry.dsn`` The DSN provided by Sentry. If blank, the reporter will discard events. ``sentry.environment`` (optional) The environment your application is running in. ``sentry.sample_rate`` (optional) Percentage of errors to report. (e.g. "37%") ``sentry.ignore_errors`` (optional) A comma-delimited list of exception names, unqualified (e.g. ServerTimeout) or fully qualified (e.g. baseplate.observers.timeout.ServerTimeout) to not notify sentry about. Note: a minimal list of common exceptions is hard-coded in Baseplate, this option only extends that list. Example usage:: init_sentry_client_from_config(app_config) :param raw_config: The application configuration which should have settings for the error reporter. """ cfg = config.parse_config( raw_config, { "sentry": { "dsn": config.Optional(config.String, default=None), "environment": config.Optional(config.String, default=None), "sample_rate": config.Optional(config.Percent, default=1), "ignore_errors": config.Optional(config.TupleOf(config.String), default=()), } }, ) if cfg.sentry.dsn: kwargs.setdefault("dsn", cfg.sentry.dsn) if cfg.sentry.environment: kwargs.setdefault("environment", cfg.sentry.environment) kwargs.setdefault("sample_rate", cfg.sentry.sample_rate) ignore_errors: List[Union[type, str]] = [] ignore_errors.extend(ALWAYS_IGNORE_ERRORS) ignore_errors.extend(cfg.sentry.ignore_errors) kwargs.setdefault("ignore_errors", ignore_errors) kwargs.setdefault("with_locals", False) client = sentry_sdk.Client(**kwargs) sentry_sdk.Hub.current.bind_client(client)
def make_server(server_config: Dict[str, str], listener: socket.socket, app: Any) -> StreamServer: # pylint: disable=maybe-no-member cfg = config.parse_config( server_config, { "max_concurrency": config.Optional(config.Integer), "stop_timeout": config.Optional(config.TimespanWithLegacyFallback, default=datetime.timedelta(seconds=10)), }, ) if cfg.max_concurrency is not None: logger.warning( "The max_concurrency setting is deprecated for Thrift servers. See https://git.io/Jeywc." ) pool = Pool(size=cfg.max_concurrency) server = GeventServer(processor=app, listener=listener, spawn=pool) server.stop_timeout = cfg.stop_timeout.total_seconds() runtime_monitor.start(server_config, app, pool) return server
def metrics_client_from_config(raw_config: config.RawConfig) -> Client: """Configure and return a metrics client. This expects two configuration options: ``metrics.namespace`` The root key to prefix all metrics in this application with. ``metrics.endpoint`` A ``host:port`` pair, e.g. ``localhost:2014``. If an empty string, a client that discards all metrics will be returned. :param raw_config: The application configuration which should have settings for the metrics client. :return: A configured client. """ cfg = config.parse_config( raw_config, { "metrics": { "namespace": config.Optional(config.String, default=""), "endpoint": config.Optional(config.Endpoint), } }, ) # pylint: disable=maybe-no-member return make_client(cfg.metrics.namespace, cfg.metrics.endpoint)
def thrift_pool_from_config(app_config: config.RawConfig, prefix: str, **kwargs: Any) -> "ThriftConnectionPool": """Make a ThriftConnectionPool from a configuration dictionary. The keys useful to :py:func:`thrift_pool_from_config` should be prefixed, e.g. ``example_service.endpoint`` etc. The ``prefix`` argument specifies the prefix used to filter keys. Each key is mapped to a corresponding keyword argument on the :py:class:`ThriftConnectionPool` constructor. Any keyword arguments given to this function will be also be passed through to the constructor. Keyword arguments take precedence over the configuration file. Supported keys: * ``endpoint`` (required): A ``host:port`` pair, e.g. ``localhost:2014``, where the Thrift server can be found. * ``size``: The size of the connection pool. * ``max_age``: The oldest a connection can be before it's recycled and replaced with a new one. Written as a :py:func:`~baseplate.lib.config.Timespan` e.g. ``1 minute``. * ``timeout``: The maximum amount of time a connection attempt or RPC call can take before a TimeoutError is raised. (:py:func:`~baseplate.lib.config.Timespan`) * ``max_connection_attempts``: The maximum number of times the pool will attempt to open a connection. .. versionchanged:: 1.2 ``max_retries`` was renamed ``max_connection_attempts``. """ assert prefix.endswith(".") parser = config.SpecParser({ "endpoint": config.Endpoint, "size": config.Optional(config.Integer, default=10), "max_age": config.Optional(config.Timespan, default=config.Timespan("1 minute")), "timeout": config.Optional(config.Timespan, default=config.Timespan("1 second")), "max_connection_attempts": config.Optional(config.Integer), "max_retries": config.Optional(config.Integer), }) options = parser.parse(prefix[:-1], app_config) if options.size is not None: kwargs.setdefault("size", options.size) if options.max_age is not None: kwargs.setdefault("max_age", options.max_age.total_seconds()) if options.timeout is not None: kwargs.setdefault("timeout", options.timeout.total_seconds()) if options.max_connection_attempts is not None: kwargs.setdefault("max_connection_attempts", options.max_connection_attempts) if options.max_retries is not None: raise Exception("max_retries was renamed to max_connection_attempts") return ThriftConnectionPool(endpoint=options.endpoint, **kwargs)
def make_server(server_config: Dict[str, str], listener: socket.socket, app: Any) -> StreamServer: """Make a gevent server for WSGI apps.""" # pylint: disable=maybe-no-member cfg = config.parse_config( server_config, { "handler": config.Optional(config.String, default=None), "max_concurrency": config.Integer, "stop_timeout": config.Optional(config.Integer, default=0), }, ) pool = Pool(size=cfg.max_concurrency) log = LoggingLogAdapter(logger, level=logging.DEBUG) kwargs: Dict[str, Any] = {} if cfg.handler: kwargs["handler_class"] = _load_factory(cfg.handler, default_name=None) server = WSGIServer( listener, application=app, spawn=pool, log=log, error_log=LoggingLogAdapter(logger, level=logging.ERROR), **kwargs, ) server.stop_timeout = cfg.stop_timeout runtime_monitor.start(server_config, app, pool) return server
def secrets_store_from_config(app_config: config.RawConfig, timeout: Optional[int] = None, prefix: str = "secrets.") -> SecretsStore: """Configure and return a secrets store. The keys useful to :py:func:`secrets_store_from_config` should be prefixed, e.g. ``secrets.url``, etc. Supported keys: ``path``: the path to the secrets file generated by the secrets fetcher daemon. :param app_config: The application configuration which should have settings for the secrets store. :param timeout: How long, in seconds, to block instantiation waiting for the secrets data to become available (defaults to not blocking). :param prefix: Specifies the prefix used to filter keys. Defaults to "secrets." :param backoff: retry backoff time for secrets file watcher. Defaults to None, which is mapped to DEFAULT_FILEWATCHER_BACKOFF. :param provider: The secrets provider, acceptable values are 'vault' and 'vault_csi'. Defaults to 'vault' """ parser: SecretParser assert prefix.endswith(".") config_prefix = prefix[:-1] cfg = config.parse_config( app_config, { config_prefix: { "path": config.Optional(config.String, default="/var/local/secrets.json"), "provider": config.Optional(config.String, default="vault"), "backoff": config.Optional(config.Timespan), } }, ) options = getattr(cfg, config_prefix) if options.backoff: backoff = options.backoff.total_seconds() else: backoff = None if options.provider == "vault_csi": parser = parse_vault_csi return DirectorySecretsStore(options.path, parser, timeout=timeout, backoff=backoff) return SecretsStore(options.path, timeout=timeout, backoff=backoff, parser=parse_secrets_fetcher)
def engine_from_config( app_config: config.RawConfig, secrets: Optional[SecretsStore] = None, prefix: str = "database.", **kwargs: Any, ) -> Engine: """Make an :py:class:`~sqlalchemy.engine.Engine` from a configuration dictionary. The keys useful to :py:func:`engine_from_config` should be prefixed, e.g. ``database.url``, etc. The ``prefix`` argument specifies the prefix used to filter keys. Supported keys: * ``url``: the connection URL to the database, passed to :py:func:`~sqlalchemy.engine.url.make_url` to create the :py:class:`~sqlalchemy.engine.url.URL` used to connect to the database. * ``credentials_secret`` (optional): the key used to retrieve the database credentials from ``secrets`` as a :py:class:`~baseplate.lib.secrets.CredentialSecret`. If this is supplied, any credentials given in ``url`` we be replaced by these. * ``pool_recycle`` (optional): this setting causes the pool to recycle connections after the given number of seconds has passed. It defaults to -1, or no timeout. * ``pool_pre_ping`` (optional): when set to true, this setting causes sqlalchemy to perform a liveness-check query each time a connection is checked out of the pool. If the liveness-check fails, the connection is gracefully recycled. This ensures severed connections are handled more gracefully, at the cost of doing a `SELECT 1` at the start of each checkout. When used, this obviates most of the reasons you might use pool_recycle, and as such they shouldn't normally be used simultaneously. Requires SQLAlchemy 1.3. """ assert prefix.endswith(".") parser = config.SpecParser( { "url": config.String, "credentials_secret": config.Optional(config.String), "pool_recycle": config.Optional(config.Integer), "pool_pre_ping": config.Optional(config.Boolean), } ) options = parser.parse(prefix[:-1], app_config) url = make_url(options.url) if options.pool_recycle is not None: kwargs.setdefault("pool_recycle", options.pool_recycle) if options.pool_pre_ping is not None: kwargs.setdefault("pool_pre_ping", options.pool_pre_ping) if options.credentials_secret: if not secrets: raise TypeError("'secrets' is required if 'credentials_secret' is set") credentials = secrets.get_credentials(options.credentials_secret) url.username = credentials.username url.password = credentials.password return create_engine(url, **kwargs)
def experiments_client_from_config( app_config: config.RawConfig, event_logger: EventLogger, prefix: str = "experiments.") -> ExperimentsContextFactory: """Configure and return an :py:class:`ExperimentsContextFactory` object. The keys useful to :py:func:`experiments_client_from_config` should be prefixed, e.g. ``experiments.path``, etc. Supported keys: ``path``: the path to the experiment configuration file generated by the experiment configuration fetcher daemon. ``timeout`` (optional): the time that we should wait for the file specified by ``path`` to exist. Defaults to `None` which is `infinite`. :param raw_config: The application configuration which should have settings for the experiments client. :param event_logger: The EventLogger to be used to log bucketing events. :param prefix: the prefix used to filter keys (defaults to "experiments."). :param backoff: retry backoff time for experiments file watcher. Defaults to None, which is mapped to DEFAULT_FILEWATCHER_BACKOFF. """ assert prefix.endswith(".") config_prefix = prefix[:-1] cfg = config.parse_config( app_config, { config_prefix: { "path": config.Optional(config.String, default="/var/local/experiments.json"), "timeout": config.Optional(config.Timespan), "backoff": config.Optional(config.Timespan), } }, ) options = getattr(cfg, config_prefix) # pylint: disable=maybe-no-member if options.timeout: timeout = options.timeout.total_seconds() else: timeout = None if options.backoff: backoff = options.backoff.total_seconds() else: backoff = None return ExperimentsContextFactory(options.path, event_logger, timeout=timeout, backoff=backoff)
def cluster_from_config( app_config: config.RawConfig, secrets: Optional[SecretsStore] = None, prefix: str = "cassandra.", execution_profiles: Optional[Dict[str, ExecutionProfile]] = None, **kwargs: Any, ) -> Cluster: """Make a Cluster from a configuration dictionary. The keys useful to :py:func:`cluster_from_config` should be prefixed, e.g. ``cassandra.contact_points`` etc. The ``prefix`` argument specifies the prefix used to filter keys. Each key is mapped to a corresponding keyword argument on the :py:class:`~cassandra.cluster.Cluster` constructor. Any keyword arguments given to this function will be passed through to the :py:class:`~cassandra.cluster.Cluster` constructor. Keyword arguments take precedence over the configuration file. Supported keys: * ``contact_points`` (required): comma delimited list of contact points to try connecting for cluster discovery * ``port``: The server-side port to open connections to. * ``credentials_secret`` (optional): the key used to retrieve the database credentials from ``secrets`` as a :py:class:`~baseplate.lib.secrets.CredentialSecret`. :param execution_profiles: Configured execution profiles to provide to the rest of the application. """ assert prefix.endswith(".") parser = config.SpecParser({ "contact_points": config.TupleOf(config.String), "port": config.Optional(config.Integer, default=None), "credentials_secret": config.Optional(config.String), }) options = parser.parse(prefix[:-1], app_config) if options.port: kwargs.setdefault("port", options.port) if options.credentials_secret: if not secrets: raise TypeError( "'secrets' is required if 'credentials_secret' is set") credentials = secrets.get_credentials(options.credentials_secret) kwargs.setdefault( "auth_provider", PlainTextAuthProvider(username=credentials.username, password=credentials.password), ) return Cluster(options.contact_points, execution_profiles=execution_profiles, **kwargs)
def main() -> NoReturn: arg_parser = argparse.ArgumentParser( description=sys.modules[__name__].__doc__) arg_parser.add_argument("config_file", type=argparse.FileType("r"), help="path to a configuration file") arg_parser.add_argument("--debug", default=False, action="store_true", help="enable debug logging") args = arg_parser.parse_args() if args.debug: level = logging.DEBUG else: level = logging.INFO logging.basicConfig(level=level, format="%(message)s") # quiet kazoo's verbose logs a bit logging.getLogger("kazoo").setLevel(logging.WARNING) parser = configparser.RawConfigParser( interpolation=EnvironmentInterpolation()) parser.read_file(args.config_file) watcher_config = dict(parser.items("live-data")) cfg = config.parse_config( watcher_config, { "nodes": config.DictOf( { "source": config.String, "dest": config.String, "owner": config.Optional(config.UnixUser), "group": config.Optional(config.UnixGroup), "mode": config.Optional(config.Integer(base=8), default=0o400), # type: ignore }) }, ) # pylint: disable=maybe-no-member nodes = cfg.nodes.values() secrets = secrets_store_from_config(watcher_config, timeout=30) zookeeper = zookeeper_client_from_config(secrets, watcher_config, read_only=True) zookeeper.start() try: watch_zookeeper_nodes(zookeeper, nodes) finally: zookeeper.stop()
def engine_from_config( app_config: config.RawConfig, secrets: Optional[SecretsStore] = None, prefix: str = "database.", **kwargs: Any, ) -> Engine: """Make an :py:class:`~sqlalchemy.engine.Engine` from a configuration dictionary. The keys useful to :py:func:`engine_from_config` should be prefixed, e.g. ``database.url``, etc. The ``prefix`` argument specifies the prefix used to filter keys. Supported keys: * ``url``: the connection URL to the database, passed to :py:func:`~sqlalchemy.engine.url.make_url` to create the :py:class:`~sqlalchemy.engine.url.URL` used to connect to the database. * ``credentials_secret`` (optional): the key used to retrieve the database credentials from ``secrets`` as a :py:class:`~baseplate.lib.secrets.CredentialSecret`. If this is supplied, any credentials given in ``url`` we be replaced by these. * ``pool_recycle`` (optional): this setting causes the pool to recycle connections after the given number of seconds has passed. It defaults to -1, or no timeout. """ assert prefix.endswith(".") parser = config.SpecParser({ "url": config.String, "credentials_secret": config.Optional(config.String), "pool_recycle": config.Optional(config.Integer), }) options = parser.parse(prefix[:-1], app_config) url = make_url(options.url) if options.pool_recycle is not None: kwargs.setdefault("pool_recycle", options.pool_recycle) if options.credentials_secret: if not secrets: raise TypeError( "'secrets' is required if 'credentials_secret' is set") credentials = secrets.get_credentials(options.credentials_secret) url.username = credentials.username url.password = credentials.password return create_engine(url, **kwargs)
def exchange_from_config(app_config: config.RawConfig, prefix: str, **kwargs: Any) -> Exchange: """Make an Exchange from a configuration dictionary. The keys useful to :py:func:`exchange_from_config` should be prefixed, e.g. ``amqp.exchange_name`` etc. The ``prefix`` argument specifies the prefix used to filter keys. Each key is mapped to a corresponding keyword argument on the :py:class:`~kombu.Exchange` constructor. Any keyword arguments given to this function will be passed through to the :py:class:`~kombu.Exchange` constructor. Keyword arguments take precedence over the configuration file. Supported keys: * ``exchange_name`` * ``exchange_type`` """ assert prefix.endswith(".") parser = config.SpecParser({ "exchange_name": config.Optional(config.String), "exchange_type": config.String }) options = parser.parse(prefix[:-1], app_config) return Exchange(name=options.exchange_name or "", type=options.exchange_type, **kwargs)
def connection_from_config(app_config: config.RawConfig, prefix: str, **kwargs: Any) -> Connection: """Make a Connection from a configuration dictionary. The keys useful to :py:func:`connection_from_config` should be prefixed, e.g. ``amqp.hostname`` etc. The ``prefix`` argument specifies the prefix used to filter keys. Each key is mapped to a corresponding keyword argument on the :py:class:`~kombu.connection.Connection` constructor. Any keyword arguments given to this function will be passed through to the :py:class:`~kombu.connection.Connection` constructor. Keyword arguments take precedence over the configuration file. Supported keys: * ``hostname`` * ``virtual_host`` """ assert prefix.endswith(".") parser = config.SpecParser({ "hostname": config.String, "virtual_host": config.Optional(config.String) }) options = parser.parse(prefix[:-1], app_config) return Connection(hostname=options.hostname, virtual_host=options.virtual_host, **kwargs)
def hvac_factory_from_config(app_config: config.RawConfig, secrets_store: SecretsStore, prefix: str = "vault.") -> "HvacContextFactory": """Make an HVAC client factory from a configuration dictionary. The keys useful to :py:func:`hvac_factory_from_config` should be prefixed, e.g. ``vault.timeout``. The ``prefix`` argument specifies the prefix used to filter keys. Supported keys: * ``timeout``: How long to wait for calls to Vault. (:py:func:`~baseplate.lib.config.Timespan`) :param app_config: The raw application configuration. :param secrets_store: A configured secrets store from which we can get a Vault authentication token. :param prefix: The prefix for configuration keys. """ assert prefix.endswith(".") parser = config.SpecParser({ "timeout": config.Optional(config.Timespan, default=datetime.timedelta(seconds=1)) }) options = parser.parse(prefix[:-1], app_config) return HvacContextFactory(secrets_store, options.timeout)
def metrics_client_from_config(raw_config: config.RawConfig) -> Client: """Configure and return a metrics client. This expects two configuration options: ``metrics.namespace`` The root key to prefix all metrics in this application with. ``metrics.endpoint`` A ``host:port`` pair, e.g. ``localhost:2014``. If an empty string, a client that discards all metrics will be returned. `metrics.log_if_unconfigured`` Whether to log metrics when there is no unconfigured endpoint. Defaults to false. `metrics.swallow_network_errors`` When false, network errors during sending to metrics collector will cause an exception to be thrown. When true, those exceptions are logged and swallowed instead. Defaults to false. :param raw_config: The application configuration which should have settings for the metrics client. :return: A configured client. """ cfg = config.parse_config( raw_config, { "metrics": { "namespace": config.Optional(config.String, default=""), "endpoint": config.Optional(config.Endpoint), "log_if_unconfigured": config.Optional(config.Boolean, default=False), "swallow_network_errors": config.Optional(config.Boolean, default=False), } }, ) # pylint: disable=maybe-no-member return make_client( namespace=cfg.metrics.namespace, endpoint=cfg.metrics.endpoint, log_if_unconfigured=cfg.metrics.log_if_unconfigured, swallow_network_errors=cfg.metrics.swallow_network_errors, )
def from_config( cls, app_config: config.RawConfig) -> "TimeoutBaseplateObserver": cfg = config.parse_config( app_config, { "server_timeout": { "default": config.Optional(config.Timespan, default=datetime.timedelta(seconds=10)), "debug": config.Optional(config.Boolean, default=False), "by_endpoint": config.DictOf(config.Timespan), } }, ) return cls(cfg.server_timeout)
def from_config_and_client( cls, raw_config: config.RawConfig, client: metrics.Client ) -> "MetricsBaseplateObserver": cfg = config.parse_config( raw_config, {"metrics_observer": {"sample_rate": config.Optional(config.Percent, default=1.0)}}, ) return cls(client, sample_rate=cfg.metrics_observer.sample_rate)
def from_config_and_client( cls, raw_config: config.RawConfig, client: metrics.Client ) -> "TaggedMetricsBaseplateObserver": cfg = config.parse_config( raw_config, { "metrics": { "allowlist": config.Optional(config.TupleOf(config.String), default=[]), }, "metrics_observer": {"sample_rate": config.Optional(config.Percent, default=1.0)}, }, ) return cls( client, allowlist=set(cfg.metrics.allowlist) | {"client", "endpoint"}, sample_rate=cfg.metrics_observer.sample_rate, )
def pool_from_config(app_config: config.RawConfig, prefix: str = "redis.", **kwargs: Any) -> redis.ConnectionPool: """Make a ConnectionPool from a configuration dictionary. The keys useful to :py:func:`pool_from_config` should be prefixed, e.g. ``redis.url``, ``redis.max_connections``, etc. The ``prefix`` argument specifies the prefix used to filter keys. Each key is mapped to a corresponding keyword argument on the :py:class:`redis.ConnectionPool` constructor. Supported keys: * ``url`` (required): a URL like ``redis://localhost/0``. * ``max_connections``: an integer maximum number of connections in the pool * ``socket_connect_timeout``: how long to wait for sockets to connect. e.g. ``200 milliseconds`` (:py:func:`~baseplate.lib.config.Timespan`) * ``socket_timeout``: how long to wait for socket operations, e.g. ``200 milliseconds`` (:py:func:`~baseplate.lib.config.Timespan`) """ assert prefix.endswith(".") parser = config.SpecParser({ "url": config.String, "max_connections": config.Optional(config.Integer, default=None), "socket_connect_timeout": config.Optional(config.Timespan, default=None), "socket_timeout": config.Optional(config.Timespan, default=None), }) options = parser.parse(prefix[:-1], app_config) if options.max_connections is not None: kwargs.setdefault("max_connections", options.max_connections) if options.socket_connect_timeout is not None: kwargs.setdefault("socket_connect_timeout", options.socket_connect_timeout.total_seconds()) if options.socket_timeout is not None: kwargs.setdefault("socket_timeout", options.socket_timeout.total_seconds()) return redis.BlockingConnectionPool.from_url(options.url, **kwargs)
def from_config( cls, app_config: config.RawConfig) -> "TimeoutBaseplateObserver": cfg = config.parse_config( app_config, { "server_timeout": { "default": config.Optional( config.TimespanOrInfinite, default=config.InfiniteTimespan, ), "debug": config.Optional(config.Boolean, default=False), "by_endpoint": config.DictOf(config.TimespanOrInfinite), } }, ) return cls(cfg.server_timeout)
def connection_from_config(app_config: config.RawConfig, prefix: str, secrets: Optional[SecretsStore] = None, **kwargs: Any) -> Connection: """Make a Connection from a configuration dictionary. The keys useful to :py:func:`connection_from_config` should be prefixed, e.g. ``amqp.hostname`` etc. The ``prefix`` argument specifies the prefix used to filter keys. Each key is mapped to a corresponding keyword argument on the :py:class:`~kombu.connection.Connection` constructor. Any keyword arguments given to this function will be passed through to the :py:class:`~kombu.connection.Connection` constructor. Keyword arguments take precedence over the configuration file. Supported keys: * ``credentials_secret`` * ``hostname`` * ``virtual_host`` """ assert prefix.endswith(".") parser = config.SpecParser({ "credentials_secret": config.Optional(config.String), "hostname": config.String, "virtual_host": config.Optional(config.String), }) options = parser.parse(prefix[:-1], app_config) if options.credentials_secret: if not secrets: raise ValueError( "'secrets' is required if 'credentials_secret' is set") credentials = secrets.get_credentials(options.credentials_secret) kwargs.setdefault("userid", credentials.username) kwargs.setdefault("password", credentials.password) return Connection(hostname=options.hostname, virtual_host=options.virtual_host, **kwargs)
def make_server(server_config: Dict[str, str], listener: socket.socket, app: Any) -> StreamServer: """Make a gevent server for WSGI apps.""" # pylint: disable=maybe-no-member cfg = config.parse_config( server_config, { "handler": config.Optional(config.String, default=None), "max_concurrency": config.Optional(config.Integer), "stop_timeout": config.Optional(config.TimespanWithLegacyFallback, default=datetime.timedelta(seconds=10)), }, ) if cfg.max_concurrency is not None: raise Exception( "The max_concurrency setting is not allowed for WSGI servers. See https://git.io/Jeywc." ) pool = Pool() log = LoggingLogAdapter(logger, level=logging.DEBUG) kwargs: Dict[str, Any] = {} if cfg.handler: kwargs["handler_class"] = _load_factory(cfg.handler, default_name=None) server = WSGIServer( listener, application=app, spawn=pool, log=log, error_log=LoggingLogAdapter(logger, level=logging.ERROR), **kwargs, ) server.stop_timeout = cfg.stop_timeout.total_seconds() runtime_monitor.start(server_config, app, pool) return server
def test_simple_config(self): result = config.parse_config( self.config, { "simple": config.String, "foo": {"bar": config.Integer}, "noo": {"bar": config.Optional(config.String, default="")}, "deep": {"so": {"deep": config.String}}, }, ) self.assertEqual(result.simple, "oink") self.assertEqual(result.foo.bar, 33) self.assertEqual(result.noo.bar, "") self.assertEqual(result.deep.so.deep, "very")
def from_config( cls, app_config: config.RawConfig) -> "TimeoutBaseplateObserver": cfg = config.parse_config( app_config, { "server_timeout": { "default": config.Optional(config.TimespanOrInfinite, default=None), "debug": config.Optional(config.Boolean, default=False), "by_endpoint": config.DictOf(config.TimespanOrInfinite), } }, ) if cfg.server_timeout.default is None: warn_deprecated( "No server_timeout.default configured. Defaulting to no timeout. " "Set the default timeout to 'infinite' or a timespan like '2 seconds'. " "This will become mandatory in Baseplate.py 2.0.") cfg.server_timeout.default = config.InfiniteTimespan return cls(cfg.server_timeout)
def make_server(server_config: Dict[str, str], listener: socket.socket, app: Any) -> StreamServer: # pylint: disable=maybe-no-member cfg = config.parse_config( server_config, { "max_concurrency": config.Integer, "stop_timeout": config.Optional(config.Integer, default=0), }, ) pool = Pool(size=cfg.max_concurrency) server = GeventServer(processor=app, listener=listener, spawn=pool) server.stop_timeout = cfg.stop_timeout runtime_monitor.start(server_config, app, pool) return server
def make_server(server_config: Dict[str, str], listener: socket.socket, app: Any) -> StreamServer: # pylint: disable=maybe-no-member cfg = config.parse_config( server_config, { "max_concurrency": config.Integer, "stop_timeout": config.Optional(config.TimespanWithLegacyFallback, default=datetime.timedelta(seconds=10)), }, ) pool = Pool(size=cfg.max_concurrency) server = GeventServer(processor=app, listener=listener, spawn=pool) server.stop_timeout = cfg.stop_timeout.total_seconds() runtime_monitor.start(server_config, app, pool) return server
def secrets_store_from_config(app_config: config.RawConfig, timeout: Optional[int] = None, prefix: str = "secrets.") -> SecretsStore: """Configure and return a secrets store. The keys useful to :py:func:`secrets_store_from_config` should be prefixed, e.g. ``secrets.url``, etc. Supported keys: ``path``: the path to the secrets file generated by the secrets fetcher daemon. :param app_config: The application configuration which should have settings for the secrets store. :param timeout: How long, in seconds, to block instantiation waiting for the secrets data to become available (defaults to not blocking). :param prefix: Specifies the prefix used to filter keys. Defaults to "secrets." """ assert prefix.endswith(".") config_prefix = prefix[:-1] cfg = config.parse_config( app_config, { config_prefix: { "path": config.Optional(config.String, default="/var/local/secrets.json") } }, ) options = getattr(cfg, config_prefix) # pylint: disable=maybe-no-member return SecretsStore(options.path, timeout=timeout)
def publish_traces() -> None: arg_parser = argparse.ArgumentParser() arg_parser.add_argument("config_file", type=argparse.FileType("r"), help="path to a configuration file") arg_parser.add_argument( "--queue-name", default="main", help="name of trace queue / publisher config (default: main)", ) arg_parser.add_argument("--debug", default=False, action="store_true", help="enable debug logging") arg_parser.add_argument( "--app-name", default="main", metavar="NAME", help="name of app to load from config_file (default: main)", ) args = arg_parser.parse_args() if args.debug: level = logging.DEBUG else: level = logging.WARNING logging.basicConfig(level=level) config_parser = configparser.RawConfigParser( interpolation=EnvironmentInterpolation()) config_parser.read_file(args.config_file) publisher_raw_cfg = dict( config_parser.items("trace-publisher:" + args.queue_name)) publisher_cfg = config.parse_config( publisher_raw_cfg, { "zipkin_api_url": config.DefaultFromEnv(config.Endpoint, "BASEPLATE_ZIPKIN_API_URL"), "post_timeout": config.Optional(config.Integer, POST_TIMEOUT_DEFAULT), "max_batch_size": config.Optional(config.Integer, MAX_BATCH_SIZE_DEFAULT), "retry_limit": config.Optional(config.Integer, RETRY_LIMIT_DEFAULT), "max_queue_size": config.Optional(config.Integer, MAX_QUEUE_SIZE), }, ) trace_queue = MessageQueue( "/traces-" + args.queue_name, max_messages=publisher_cfg.max_queue_size, max_message_size=MAX_SPAN_SIZE, ) # pylint: disable=maybe-no-member inner_batch = TraceBatch(max_size=publisher_cfg.max_batch_size) batcher = TimeLimitedBatch(inner_batch, MAX_BATCH_AGE) metrics_client = metrics_client_from_config(publisher_raw_cfg) publisher = ZipkinPublisher( publisher_cfg.zipkin_api_url.address, metrics_client, post_timeout=publisher_cfg.post_timeout, ) while True: message: Optional[bytes] try: message = trace_queue.get(timeout=0.2) except TimedOutError: message = None try: batcher.add(message) except BatchFull: serialized = batcher.serialize() publisher.publish(serialized) batcher.reset() batcher.add(message)
def http_adapter_from_config( app_config: config.RawConfig, prefix: str, **kwargs: Any ) -> HTTPAdapter: """Make an HTTPAdapter from a configuration dictionary. The keys useful to :py:func:`http_adapter_from_config` should be prefixed, e.g. ``http.pool_connections``, ``http.max_retries``, etc. The ``prefix`` argument specifies the prefix used. Each key is mapped to a corresponding keyword argument on the :py:class:`~requests.adapters.HTTPAdapter` constructor. Supported keys: * ``pool_connections``: The number of connections to cache (default: 10). * ``pool_maxsize``: The maximum number of connections to keep in the pool (default: 10). * ``max_retries``: How many times to retry DNS lookups or connection attempts, but never sending data (default: 0). * ``pool_block``: Whether the connection pool will block when trying to get a connection (default: false). Additionally, the rules for Advocate's address filtering can be configured with the ``filter`` sub-keys: * ``filter.ip_allowlist``: A comma-delimited list of IP addresses (1.2.3.4) or CIDR-notation (1.2.3.0/24) ranges that the client can always connect to (default: anything not on the local network). * ``filter.ip_denylist``: A comma-delimited list of IP addresses or CIDR-notation ranges the client may never connect to (default: the local network). * ``filter.port_allowlist``: A comma-delimited list of TCP port numbers that the client can connect to (default: 80, 8080, 443, 8443, 8000). * ``filter.port_denylist``: A comma-delimited list of TCP port numbers that the client may never connect to (default: none). * ``filter.hostname_denylist``: A comma-delimited list of hostnames that the client may never connect to (default: none). * ``filter.allow_ipv6``: Should the client be allowed to connect to IPv6 hosts? (default: false, note: IPv6 is tricky to apply filtering rules comprehensively to). """ assert prefix.endswith(".") parser = config.SpecParser( { "pool_connections": config.Optional(config.Integer, default=10), "pool_maxsize": config.Optional(config.Integer, default=10), "max_retries": config.Optional(config.Integer, default=0), "pool_block": config.Optional(config.Boolean, default=False), "filter": { "ip_allowlist": config.Optional(config.TupleOf(ipaddress.ip_network)), "ip_denylist": config.Optional(config.TupleOf(ipaddress.ip_network)), "port_allowlist": config.Optional(config.TupleOf(int)), "port_denylist": config.Optional(config.TupleOf(int)), "hostname_denylist": config.Optional(config.TupleOf(config.String)), "allow_ipv6": config.Optional(config.Boolean, default=False), }, } ) options = parser.parse(prefix[:-1], app_config) if options.pool_connections is not None: kwargs.setdefault("pool_connections", options.pool_connections) if options.pool_maxsize is not None: kwargs.setdefault("pool_maxsize", options.pool_maxsize) if options.max_retries is not None: kwargs.setdefault("max_retries", options.max_retries) if options.pool_block is not None: kwargs.setdefault("pool_block", options.pool_block) kwargs.setdefault( "validator", AddrValidator( ip_whitelist=options.filter.ip_allowlist, ip_blacklist=options.filter.ip_denylist, port_whitelist=options.filter.port_allowlist, port_blacklist=options.filter.port_denylist, hostname_blacklist=options.filter.hostname_denylist, allow_ipv6=options.filter.allow_ipv6, ), ) return ValidatingHTTPAdapter(**kwargs)
def error_reporter_from_config(raw_config: config.RawConfig, module_name: str) -> raven.Client: """Configure and return a error reporter. This expects one configuration option and can take many optional ones: ``sentry.dsn`` The DSN provided by Sentry. If blank, the reporter will discard events. ``sentry.site`` (optional) An arbitrary string to identify this client installation. ``sentry.environment`` (optional) The environment your application is running in. ``sentry.exclude_paths`` (optional) Comma-delimited list of module prefixes to ignore when discovering where an error came from. ``sentry.include_paths`` (optional) Comma-delimited list of paths to include for consideration when drilling down to an exception. ``sentry.ignore_exceptions`` (optional) Comma-delimited list of fully qualified names of exception classes (potentially with * globs) to not report. ``sentry.sample_rate`` (optional) Percentage of errors to report. (e.g. "37%") ``sentry.processors`` (optional) Comma-delimited list of fully qualified names of processor classes to apply to events before sending to Sentry. Example usage:: error_reporter_from_config(app_config, __name__) :param raw_config: The application configuration which should have settings for the error reporter. :param module_name: ``__name__`` of the root module of the application. """ cfg = config.parse_config( raw_config, { "sentry": { "dsn": config.Optional(config.String, default=None), "site": config.Optional(config.String, default=None), "environment": config.Optional(config.String, default=None), "include_paths": config.Optional(config.String, default=None), "exclude_paths": config.Optional(config.String, default=None), "ignore_exceptions": config.Optional(config.TupleOf(config.String), default=[]), "sample_rate": config.Optional(config.Percent, default=1), "processors": config.Optional( config.TupleOf(config.String), default=["raven.processors.SanitizePasswordsProcessor"], ), } }, ) application_module = sys.modules[module_name] directory = os.path.dirname(application_module.__file__) release = None while directory != "/": try: release = raven.fetch_git_sha(directory) except raven.exceptions.InvalidGitRepository: directory = os.path.dirname(directory) else: break # pylint: disable=maybe-no-member return raven.Client( dsn=cfg.sentry.dsn, site=cfg.sentry.site, release=release, environment=cfg.sentry.environment, include_paths=cfg.sentry.include_paths, exclude_paths=cfg.sentry.exclude_paths, ignore_exceptions=cfg.sentry.ignore_exceptions, sample_rate=cfg.sentry.sample_rate, processors=cfg.sentry.processors, )