def test_tupleof_invalid(self): parser = config.TupleOf(config.Integer) with self.assertRaises(ValueError): parser("") with self.assertRaises(ValueError): parser("a, b")
def cluster_from_config( app_config: config.RawConfig, secrets: Optional[SecretsStore] = None, prefix: str = "cassandra.", execution_profiles: Optional[Dict[str, ExecutionProfile]] = None, **kwargs: Any, ): """Make a Cluster from a configuration dictionary. The keys useful to :py:func:`cluster_from_config` should be prefixed, e.g. ``cassandra.contact_points`` etc. The ``prefix`` argument specifies the prefix used to filter keys. Each key is mapped to a corresponding keyword argument on the :py:class:`~cassandra.cluster.Cluster` constructor. Any keyword arguments given to this function will be passed through to the :py:class:`~cassandra.cluster.Cluster` constructor. Keyword arguments take precedence over the configuration file. Supported keys: * ``contact_points`` (required): comma delimited list of contact points to try connecting for cluster discovery * ``port``: The server-side port to open connections to. * ``credentials_secret`` (optional): the key used to retrieve the database credentials from ``secrets`` as a :py:class:`~baseplate.secrets.CredentialSecret`. :param execution_profiles: Configured execution profiles to provide to the rest of the application. """ assert prefix.endswith(".") parser = config.SpecParser({ "contact_points": config.TupleOf(config.String), "port": config.Optional(config.Integer, default=None), "credentials_secret": config.Optional(config.String), }) options = parser.parse(prefix[:-1], app_config) if options.port: kwargs.setdefault("port", options.port) if options.credentials_secret: if not secrets: raise TypeError( "'secrets' is required if 'credentials_secret' is set") credentials = secrets.get_credentials(options.credentials_secret) kwargs.setdefault( "auth_provider", PlainTextAuthProvider(username=credentials.username, password=credentials.password), ) return Cluster(options.contact_points, execution_profiles=execution_profiles, **kwargs)
def make_plugin(app_config, http, irc, salons): deploy_config = config.parse_config( app_config, { "organizations": config.TupleOf(config.String), "default_hours_start": parse_time, "default_hours_end": parse_time, "default_tz": pytz.timezone, "blackout_hours_start": parse_time, "blackout_hours_end": parse_time, }) monitor = DeployMonitor(deploy_config, irc, salons) # set up http api deploy_root = resource.Resource() http.root.putChild('deploy', deploy_root) deploy_root.putChild('status', DeployStatusListener(http.hmac_secret, monitor)) deploy_root.putChild('begin', DeployBeganListener(http, monitor)) deploy_root.putChild('end', DeployEndedListener(http, monitor)) deploy_root.putChild('abort', DeployAbortedListener(http, monitor)) deploy_root.putChild('error', DeployErrorListener(http, monitor)) deploy_root.putChild('progress', DeployProgressListener(http, monitor)) deploy_root.putChild('hold', DeployHoldListener(http, monitor)) deploy_root.putChild('unhold', DeployUnHoldListener(http, monitor)) deploy_root.putChild('hold_all', DeployHoldAllListener(http, monitor)) deploy_root.putChild('unhold_all', DeployUnholdAllListener(http, monitor)) deploy_root.putChild('send_announcement', DeploySendAnnouncementListener(http, monitor)) deploy_root.putChild('get_salon_names', DeployGetSalonNamesListener(http, monitor)) # register our irc commands irc.register_command(monitor.salonify) irc.register_command(monitor.desalonify) irc.register_command(monitor.repository) irc.register_command(monitor.help) irc.register_command(monitor.status) irc.register_command(monitor.status_all) irc.register_command(monitor.hold) irc.register_command(monitor.unhold) irc.register_command(monitor.hold_all) irc.register_command(monitor.unhold_all) irc.register_command(monitor.acquire) irc.register_command(monitor.release) irc.register_command(monitor.jump) irc.register_command(monitor.notready) irc.register_command(monitor.enqueue) irc.register_command(monitor.kick) irc.register_command(monitor.refresh) irc.register_command(monitor.refresh_all) irc.register_command(monitor.forget) irc.register_command(monitor.announce) irc.register_command(monitor.set_deploy_hours) irc.register_command(monitor.get_deploy_hours)
def cluster_from_config(app_config, prefix="cassandra.", **kwargs): """Make a Cluster from a configuration dictionary. The keys useful to :py:func:`cluster_from_config` should be prefixed, e.g. ``cassandra.contact_points`` etc. The ``prefix`` argument specifies the prefix used to filter keys. Each key is mapped to a corresponding keyword argument on the :py:class:`~cassandra.cluster.Cluster` constructor. Any keyword arguments given to this function will be passed through to the :py:class:`~cassandra.cluster.Cluster` constructor. Keyword arguments take precedence over the configuration file. Supported keys: * ``contact_points`` (required): comma delimited list of contact points to try connecting for cluster discovery * ``port``: The server-side port to open connections to. """ assert prefix.endswith(".") config_prefix = prefix[:-1] cfg = config.parse_config( app_config, { config_prefix: { "contact_points": config.TupleOf(config.String), "port": config.Optional(config.Integer, default=None), } }, ) options = getattr(cfg, config_prefix) if options.port: kwargs.setdefault("port", options.port) return Cluster(options.contact_points, **kwargs)
def test_tupleof_valid(self): parser = config.TupleOf(config.Integer) self.assertEqual(parser("1,2,3"), [1, 2, 3]) self.assertEqual(parser("4, 5, 6"), [4, 5, 6])
def error_reporter_from_config(raw_config: config.RawConfig, module_name: str) -> "raven.Client": """Configure and return a error reporter. This expects one configuration option and can take many optional ones: ``sentry.dsn`` The DSN provided by Sentry. If blank, the reporter will discard events. ``sentry.site`` (optional) An arbitrary string to identify this client installation. ``sentry.environment`` (optional) The environment your application is running in. ``sentry.exclude_paths`` (optional) Comma-delimited list of module prefixes to ignore when discovering where an error came from. ``sentry.include_paths`` (optional) Comma-delimited list of paths to include for consideration when drilling down to an exception. ``sentry.ignore_exceptions`` (optional) Comma-delimited list of fully qualified names of exception classes (potentially with * globs) to not report. ``sentry.sample_rate`` (optional) Percentage of errors to report. (e.g. "37%") ``sentry.processors`` (optional) Comma-delimited list of fully qualified names of processor classes to apply to events before sending to Sentry. Example usage:: error_reporter_from_config(app_config, __name__) :param dict raw_config: The application configuration which should have settings for the error reporter. :param str module_name: ``__name__`` of the root module of the application. :rtype: :py:class:`raven.Client` """ import raven # pylint: disable=redefined-outer-name cfg = config.parse_config( raw_config, { "sentry": { "dsn": config.Optional(config.String, default=None), "site": config.Optional(config.String, default=None), "environment": config.Optional(config.String, default=None), "include_paths": config.Optional(config.String, default=None), "exclude_paths": config.Optional(config.String, default=None), "ignore_exceptions": config.Optional(config.TupleOf(config.String), default=[]), "sample_rate": config.Optional(config.Percent, default=1), "processors": config.Optional( config.TupleOf(config.String), default=["raven.processors.SanitizePasswordsProcessor"], ), } }, ) application_module = sys.modules[module_name] directory = os.path.dirname(application_module.__file__) release = None while directory != "/": try: release = raven.fetch_git_sha(directory) except raven.exceptions.InvalidGitRepository: directory = os.path.dirname(directory) else: break # pylint: disable=maybe-no-member return raven.Client( dsn=cfg.sentry.dsn, site=cfg.sentry.site, release=release, environment=cfg.sentry.environment, include_paths=cfg.sentry.include_paths, exclude_paths=cfg.sentry.exclude_paths, ignore_exceptions=cfg.sentry.ignore_exceptions, sample_rate=cfg.sentry.sample_rate, processors=cfg.sentry.processors, )
def main(): arg_parser = argparse.ArgumentParser() arg_parser.add_argument("config_file", type=argparse.FileType("r"), help="path to a configuration file") arg_parser.add_argument("--debug", default=False, action="store_true", help="enable debug logging") arg_parser.add_argument( "--once", default=False, action="store_true", help="only run the fetcher once rather than as a daemon", ) args = arg_parser.parse_args() if args.debug: level = logging.DEBUG else: level = logging.INFO logging.basicConfig(format="%(asctime)s:%(levelname)s:%(message)s", level=level) parser = configparser.RawConfigParser() parser.readfp(args.config_file) # pylint: disable=deprecated-method fetcher_config = dict(parser.items("secret-fetcher")) cfg = config.parse_config( fetcher_config, { "vault": { "url": config.String, "role": config.String, "auth_type": config.Optional( config.OneOf(**VaultClientFactory.auth_types()), default=VaultClientFactory.auth_types()["aws"], ), "mount_point": config.Optional(config.String, default="aws-ec2"), }, "output": { "path": config.Optional(config.String, default="/var/local/secrets.json"), "owner": config.Optional(config.UnixUser, default=0), "group": config.Optional(config.UnixGroup, default=0), "mode": config.Optional(config.Integer(base=8), default=0o400), }, "secrets": config.Optional(config.TupleOf(config.String), default=[]), }, ) # pylint: disable=maybe-no-member client_factory = VaultClientFactory(cfg.vault.url, cfg.vault.role, cfg.vault.auth_type, cfg.vault.mount_point) if args.once: logger.info("Running secret fetcher once") fetch_secrets(cfg, client_factory) else: logger.info("Running secret fetcher as a daemon") while True: soonest_expiration = fetch_secrets(cfg, client_factory) time_til_expiration = soonest_expiration - datetime.datetime.utcnow( ) time_to_sleep = time_til_expiration - VAULT_TOKEN_PREFETCH_TIME time.sleep(max(int(time_to_sleep.total_seconds()), 1))
def zookeeper_client_from_config(secrets, app_config, read_only=None): """Configure and return a ZooKeeper client. There are several configuration options: ``zookeeper.hosts`` A comma-delimited list of hosts with optional ``chroot`` at the end. For example ``zk01:2181,zk02:2181`` or ``zk01:2181,zk02:2181/some/root``. ``zookeeper.credentials`` (Optional) A comma-delimited list of paths to secrets in the secrets store that contain ZooKeeper authentication credentials. Secrets should be of the "simple" type and contain ``username:password``. ``zookeeper.timeout`` (Optional) A time span of how long to wait for each connection attempt. The client will attempt forever to reconnect on connection loss. :param baseplate.secrets.SecretsStore secrets: A secrets store object :param dict raw_config: The application configuration which should have settings for the ZooKeeper client. :param bool read_only: Whether or not to allow connections to read-only ZooKeeper servers. :rtype: :py:class:`kazoo.client.KazooClient` """ full_cfg = config.parse_config( app_config, { "zookeeper": { "hosts": config.String, "credentials": config.Optional(config.TupleOf(config.String), default=[]), "timeout": config.Optional(config.Timespan, default=config.Timespan("5 seconds")), } }, ) # pylint: disable=maybe-no-member cfg = full_cfg.zookeeper auth_data = [] for path in cfg.credentials: credentials = secrets.get_simple(path) auth_data.append(("digest", credentials.decode("utf8"))) return KazooClient( cfg.hosts, timeout=cfg.timeout.total_seconds(), auth_data=auth_data, read_only=read_only, # this retry policy tells Kazoo how often it should attempt connections # to ZooKeeper from its worker thread/greenlet. when the connection is # lost during normal operation (i.e. after it was first established) # Kazoo will do retries quietly in the background while the application # continues forward. because of this, we want it to retry forever so # that it doesn't just give up at some point. the application can still # decide if it wants to exit after being disconnected for an amount of # time by polling the KazooClient.connected property. # # note: KazooClient.start() has a timeout parameter which defaults to # 15 seconds and controls the maximum amount of time start() will block # waiting for the background thread to confirm it has established a # connection. so even though we do infinite retries here, users of this # function can configure the amount of time they are willing to wait # for initial connection. connection_retry=dict( max_tries=-1, # keep reconnecting forever delay=0.1, # initial delay backoff=2, # exponential backoff max_jitter=1, # maximum amount to jitter sleeptimes max_delay=60, # never wait longer than this ), )