def init_sentry_client_from_config(raw_config: config.RawConfig, **kwargs: Any) -> None: """Configure the Sentry client. This expects one configuration option and can take many optional ones: ``sentry.dsn`` The DSN provided by Sentry. If blank, the reporter will discard events. ``sentry.environment`` (optional) The environment your application is running in. ``sentry.sample_rate`` (optional) Percentage of errors to report. (e.g. "37%") ``sentry.ignore_errors`` (optional) A comma-delimited list of exception names, unqualified (e.g. ServerTimeout) or fully qualified (e.g. baseplate.observers.timeout.ServerTimeout) to not notify sentry about. Note: a minimal list of common exceptions is hard-coded in Baseplate, this option only extends that list. Example usage:: init_sentry_client_from_config(app_config) :param raw_config: The application configuration which should have settings for the error reporter. """ cfg = config.parse_config( raw_config, { "sentry": { "dsn": config.Optional(config.String, default=None), "environment": config.Optional(config.String, default=None), "sample_rate": config.Optional(config.Percent, default=1), "ignore_errors": config.Optional(config.TupleOf(config.String), default=()), } }, ) if cfg.sentry.dsn: kwargs.setdefault("dsn", cfg.sentry.dsn) if cfg.sentry.environment: kwargs.setdefault("environment", cfg.sentry.environment) kwargs.setdefault("sample_rate", cfg.sentry.sample_rate) ignore_errors: List[Union[type, str]] = [] ignore_errors.extend(ALWAYS_IGNORE_ERRORS) ignore_errors.extend(cfg.sentry.ignore_errors) kwargs.setdefault("ignore_errors", ignore_errors) kwargs.setdefault("with_locals", False) client = sentry_sdk.Client(**kwargs) sentry_sdk.Hub.current.bind_client(client)
def test_tupleof_invalid(self): parser = config.TupleOf(config.Integer) with self.assertRaises(ValueError): parser("") with self.assertRaises(ValueError): parser("a, b")
def cluster_from_config( app_config: config.RawConfig, secrets: Optional[SecretsStore] = None, prefix: str = "cassandra.", execution_profiles: Optional[Dict[str, ExecutionProfile]] = None, **kwargs: Any, ) -> Cluster: """Make a Cluster from a configuration dictionary. The keys useful to :py:func:`cluster_from_config` should be prefixed, e.g. ``cassandra.contact_points`` etc. The ``prefix`` argument specifies the prefix used to filter keys. Each key is mapped to a corresponding keyword argument on the :py:class:`~cassandra.cluster.Cluster` constructor. Any keyword arguments given to this function will be passed through to the :py:class:`~cassandra.cluster.Cluster` constructor. Keyword arguments take precedence over the configuration file. Supported keys: * ``contact_points`` (required): comma delimited list of contact points to try connecting for cluster discovery * ``port``: The server-side port to open connections to. * ``credentials_secret`` (optional): the key used to retrieve the database credentials from ``secrets`` as a :py:class:`~baseplate.lib.secrets.CredentialSecret`. :param execution_profiles: Configured execution profiles to provide to the rest of the application. """ assert prefix.endswith(".") parser = config.SpecParser({ "contact_points": config.TupleOf(config.String), "port": config.Optional(config.Integer, default=None), "credentials_secret": config.Optional(config.String), }) options = parser.parse(prefix[:-1], app_config) if options.port: kwargs.setdefault("port", options.port) if options.credentials_secret: if not secrets: raise TypeError( "'secrets' is required if 'credentials_secret' is set") credentials = secrets.get_credentials(options.credentials_secret) kwargs.setdefault( "auth_provider", PlainTextAuthProvider(username=credentials.username, password=credentials.password), ) return Cluster(options.contact_points, execution_profiles=execution_profiles, **kwargs)
def from_config_and_client( cls, raw_config: config.RawConfig, client: metrics.Client ) -> "TaggedMetricsBaseplateObserver": cfg = config.parse_config( raw_config, { "metrics": { "allowlist": config.Optional(config.TupleOf(config.String), default=[]), }, "metrics_observer": {"sample_rate": config.Optional(config.Percent, default=1.0)}, }, ) return cls( client, allowlist=set(cfg.metrics.allowlist) | {"client", "endpoint"}, sample_rate=cfg.metrics_observer.sample_rate, )
def http_adapter_from_config( app_config: config.RawConfig, prefix: str, **kwargs: Any ) -> HTTPAdapter: """Make an HTTPAdapter from a configuration dictionary. The keys useful to :py:func:`http_adapter_from_config` should be prefixed, e.g. ``http.pool_connections``, ``http.max_retries``, etc. The ``prefix`` argument specifies the prefix used. Each key is mapped to a corresponding keyword argument on the :py:class:`~requests.adapters.HTTPAdapter` constructor. Supported keys: * ``pool_connections``: The number of connections to cache (default: 10). * ``pool_maxsize``: The maximum number of connections to keep in the pool (default: 10). * ``max_retries``: How many times to retry DNS lookups or connection attempts, but never sending data (default: 0). * ``pool_block``: Whether the connection pool will block when trying to get a connection (default: false). Additionally, the rules for Advocate's address filtering can be configured with the ``filter`` sub-keys: * ``filter.ip_allowlist``: A comma-delimited list of IP addresses (1.2.3.4) or CIDR-notation (1.2.3.0/24) ranges that the client can always connect to (default: anything not on the local network). * ``filter.ip_denylist``: A comma-delimited list of IP addresses or CIDR-notation ranges the client may never connect to (default: the local network). * ``filter.port_allowlist``: A comma-delimited list of TCP port numbers that the client can connect to (default: 80, 8080, 443, 8443, 8000). * ``filter.port_denylist``: A comma-delimited list of TCP port numbers that the client may never connect to (default: none). * ``filter.hostname_denylist``: A comma-delimited list of hostnames that the client may never connect to (default: none). * ``filter.allow_ipv6``: Should the client be allowed to connect to IPv6 hosts? (default: false, note: IPv6 is tricky to apply filtering rules comprehensively to). """ assert prefix.endswith(".") parser = config.SpecParser( { "pool_connections": config.Optional(config.Integer, default=10), "pool_maxsize": config.Optional(config.Integer, default=10), "max_retries": config.Optional(config.Integer, default=0), "pool_block": config.Optional(config.Boolean, default=False), "filter": { "ip_allowlist": config.Optional(config.TupleOf(ipaddress.ip_network)), "ip_denylist": config.Optional(config.TupleOf(ipaddress.ip_network)), "port_allowlist": config.Optional(config.TupleOf(int)), "port_denylist": config.Optional(config.TupleOf(int)), "hostname_denylist": config.Optional(config.TupleOf(config.String)), "allow_ipv6": config.Optional(config.Boolean, default=False), }, } ) options = parser.parse(prefix[:-1], app_config) if options.pool_connections is not None: kwargs.setdefault("pool_connections", options.pool_connections) if options.pool_maxsize is not None: kwargs.setdefault("pool_maxsize", options.pool_maxsize) if options.max_retries is not None: kwargs.setdefault("max_retries", options.max_retries) if options.pool_block is not None: kwargs.setdefault("pool_block", options.pool_block) kwargs.setdefault( "validator", AddrValidator( ip_whitelist=options.filter.ip_allowlist, ip_blacklist=options.filter.ip_denylist, port_whitelist=options.filter.port_allowlist, port_blacklist=options.filter.port_denylist, hostname_blacklist=options.filter.hostname_denylist, allow_ipv6=options.filter.allow_ipv6, ), ) return ValidatingHTTPAdapter(**kwargs)
def error_reporter_from_config(raw_config: config.RawConfig, module_name: str) -> raven.Client: """Configure and return a error reporter. This expects one configuration option and can take many optional ones: ``sentry.dsn`` The DSN provided by Sentry. If blank, the reporter will discard events. ``sentry.site`` (optional) An arbitrary string to identify this client installation. ``sentry.environment`` (optional) The environment your application is running in. ``sentry.exclude_paths`` (optional) Comma-delimited list of module prefixes to ignore when discovering where an error came from. ``sentry.include_paths`` (optional) Comma-delimited list of paths to include for consideration when drilling down to an exception. ``sentry.ignore_exceptions`` (optional) Comma-delimited list of fully qualified names of exception classes (potentially with * globs) to not report. ``sentry.sample_rate`` (optional) Percentage of errors to report. (e.g. "37%") ``sentry.processors`` (optional) Comma-delimited list of fully qualified names of processor classes to apply to events before sending to Sentry. Example usage:: error_reporter_from_config(app_config, __name__) :param raw_config: The application configuration which should have settings for the error reporter. :param module_name: ``__name__`` of the root module of the application. """ cfg = config.parse_config( raw_config, { "sentry": { "dsn": config.Optional(config.String, default=None), "site": config.Optional(config.String, default=None), "environment": config.Optional(config.String, default=None), "include_paths": config.Optional(config.String, default=None), "exclude_paths": config.Optional(config.String, default=None), "ignore_exceptions": config.Optional(config.TupleOf(config.String), default=[]), "sample_rate": config.Optional(config.Percent, default=1), "processors": config.Optional( config.TupleOf(config.String), default=["raven.processors.SanitizePasswordsProcessor"], ), } }, ) application_module = sys.modules[module_name] directory = os.path.dirname(application_module.__file__) release = None while directory != "/": try: release = raven.fetch_git_sha(directory) except raven.exceptions.InvalidGitRepository: directory = os.path.dirname(directory) else: break # pylint: disable=maybe-no-member return raven.Client( dsn=cfg.sentry.dsn, site=cfg.sentry.site, release=release, environment=cfg.sentry.environment, include_paths=cfg.sentry.include_paths, exclude_paths=cfg.sentry.exclude_paths, ignore_exceptions=cfg.sentry.ignore_exceptions, sample_rate=cfg.sentry.sample_rate, processors=cfg.sentry.processors, )
def main() -> None: arg_parser = argparse.ArgumentParser() arg_parser.add_argument( "config_file", type=argparse.FileType("r"), help="path to a configuration file" ) arg_parser.add_argument( "--debug", default=False, action="store_true", help="enable debug logging" ) arg_parser.add_argument( "--once", default=False, action="store_true", help="only run the fetcher once rather than as a daemon", ) args = arg_parser.parse_args() if args.debug: level = logging.DEBUG else: level = logging.INFO logging.basicConfig(format="%(asctime)s:%(levelname)s:%(message)s", level=level) parser = configparser.RawConfigParser(interpolation=EnvironmentInterpolation()) parser.read_file(args.config_file) fetcher_config = dict(parser.items("secret-fetcher")) cfg = config.parse_config( fetcher_config, { "vault": { "url": config.DefaultFromEnv(config.String, "BASEPLATE_DEFAULT_VAULT_URL"), "role": config.String, "auth_type": config.Optional( config.OneOf(**VaultClientFactory.auth_types()), default=VaultClientFactory.auth_types()["aws"], ), "mount_point": config.DefaultFromEnv( config.String, "BASEPLATE_VAULT_MOUNT_POINT", fallback="aws-ec2" ), }, "output": { "path": config.Optional(config.String, default="/var/local/secrets.json"), "owner": config.Optional(config.UnixUser, default=0), "group": config.Optional(config.UnixGroup, default=0), "mode": config.Optional(config.Integer(base=8), default=0o400), # type: ignore }, "secrets": config.Optional(config.TupleOf(config.String), default=[]), "callback": config.Optional(config.String), }, ) # pylint: disable=maybe-no-member client_factory = VaultClientFactory( cfg.vault.url, cfg.vault.role, cfg.vault.auth_type, cfg.vault.mount_point ) if args.once: logger.info("Running secret fetcher once") fetch_secrets(cfg, client_factory) trigger_callback(cfg.callback, cfg.output.path) else: logger.info("Running secret fetcher as a daemon") last_proc = None while True: soonest_expiration = fetch_secrets(cfg, client_factory) last_proc = trigger_callback(cfg.callback, cfg.output.path, last_proc) time_til_expiration = soonest_expiration - datetime.datetime.utcnow() time_to_sleep = time_til_expiration - VAULT_TOKEN_PREFETCH_TIME time.sleep(max(int(time_to_sleep.total_seconds()), 1))
def test_tupleof_valid(self): parser = config.TupleOf(config.Integer) self.assertEqual(parser("1,2,3"), [1, 2, 3]) self.assertEqual(parser("4, 5, 6"), [4, 5, 6])
def zookeeper_client_from_config( secrets: SecretsStore, app_config: config.RawConfig, read_only: Optional[bool] = None) -> KazooClient: """Configure and return a ZooKeeper client. There are several configuration options: ``zookeeper.hosts`` A comma-delimited list of hosts with optional ``chroot`` at the end. For example ``zk01:2181,zk02:2181`` or ``zk01:2181,zk02:2181/some/root``. ``zookeeper.credentials`` (Optional) A comma-delimited list of paths to secrets in the secrets store that contain ZooKeeper authentication credentials. Secrets should be of the "simple" type and contain ``username:password``. ``zookeeper.timeout`` (Optional) A time span of how long to wait for each connection attempt. The client will attempt forever to reconnect on connection loss. :param secrets: A secrets store object :param raw_config: The application configuration which should have settings for the ZooKeeper client. :param read_only: Whether or not to allow connections to read-only ZooKeeper servers. """ full_cfg = config.parse_config( app_config, { "zookeeper": { "hosts": config.String, "credentials": config.Optional(config.TupleOf(config.String), default=[]), "timeout": config.Optional(config.Timespan, default=config.Timespan("5 seconds")), } }, ) # pylint: disable=maybe-no-member cfg = full_cfg.zookeeper auth_data = [] for path in cfg.credentials: credentials = secrets.get_simple(path) auth_data.append(("digest", credentials.decode("utf8"))) return KazooClient( cfg.hosts, timeout=cfg.timeout.total_seconds(), auth_data=auth_data, read_only=read_only, # this retry policy tells Kazoo how often it should attempt connections # to ZooKeeper from its worker thread/greenlet. when the connection is # lost during normal operation (i.e. after it was first established) # Kazoo will do retries quietly in the background while the application # continues forward. because of this, we want it to retry forever so # that it doesn't just give up at some point. the application can still # decide if it wants to exit after being disconnected for an amount of # time by polling the KazooClient.connected property. # # note: KazooClient.start() has a timeout parameter which defaults to # 15 seconds and controls the maximum amount of time start() will block # waiting for the background thread to confirm it has established a # connection. so even though we do infinite retries here, users of this # function can configure the amount of time they are willing to wait # for initial connection. connection_retry=dict( max_tries=-1, # keep reconnecting forever delay=0.1, # initial delay backoff=2, # exponential backoff max_jitter=1, # maximum amount to jitter sleeptimes max_delay=60, # never wait longer than this ), )