예제 #1
0
def thrift_pool_from_config(app_config: config.RawConfig, prefix: str,
                            **kwargs: Any) -> "ThriftConnectionPool":
    """Make a ThriftConnectionPool from a configuration dictionary.

    The keys useful to :py:func:`thrift_pool_from_config` should be prefixed,
    e.g.  ``example_service.endpoint`` etc. The ``prefix`` argument specifies
    the prefix used to filter keys.  Each key is mapped to a corresponding
    keyword argument on the :py:class:`ThriftConnectionPool` constructor.  Any
    keyword arguments given to this function will be also be passed through to
    the constructor. Keyword arguments take precedence over the configuration
    file.

    Supported keys:

    * ``endpoint`` (required): A ``host:port`` pair, e.g. ``localhost:2014``,
        where the Thrift server can be found.
    * ``size``: The size of the connection pool.
    * ``max_age``: The oldest a connection can be before it's recycled and
        replaced with a new one. Written as a
        :py:func:`~baseplate.lib.config.Timespan` e.g. ``1 minute``.
    * ``timeout``: The maximum amount of time a connection attempt or RPC call
        can take before a TimeoutError is raised.
        (:py:func:`~baseplate.lib.config.Timespan`)
    * ``max_connection_attempts``: The maximum number of times the pool will attempt to
        open a connection.

    .. versionchanged:: 1.2
        ``max_retries`` was renamed ``max_connection_attempts``.

    """
    assert prefix.endswith(".")
    parser = config.SpecParser({
        "endpoint":
        config.Endpoint,
        "size":
        config.Optional(config.Integer, default=10),
        "max_age":
        config.Optional(config.Timespan, default=config.Timespan("1 minute")),
        "timeout":
        config.Optional(config.Timespan, default=config.Timespan("1 second")),
        "max_connection_attempts":
        config.Optional(config.Integer),
        "max_retries":
        config.Optional(config.Integer),
    })
    options = parser.parse(prefix[:-1], app_config)

    if options.size is not None:
        kwargs.setdefault("size", options.size)
    if options.max_age is not None:
        kwargs.setdefault("max_age", options.max_age.total_seconds())
    if options.timeout is not None:
        kwargs.setdefault("timeout", options.timeout.total_seconds())
    if options.max_connection_attempts is not None:
        kwargs.setdefault("max_connection_attempts",
                          options.max_connection_attempts)
    if options.max_retries is not None:
        raise Exception("max_retries was renamed to max_connection_attempts")

    return ThriftConnectionPool(endpoint=options.endpoint, **kwargs)
예제 #2
0
def hvac_factory_from_config(app_config: config.RawConfig,
                             secrets_store: SecretsStore,
                             prefix: str = "vault.") -> "HvacContextFactory":
    """Make an HVAC client factory from a configuration dictionary.

    The keys useful to :py:func:`hvac_factory_from_config` should be prefixed,
    e.g.  ``vault.timeout``. The ``prefix`` argument specifies the prefix used
    to filter keys.

    Supported keys:

    * ``timeout``: How long to wait for calls to Vault.
        (:py:func:`~baseplate.lib.config.Timespan`)

    :param app_config: The raw application configuration.
    :param secrets_store: A configured secrets store from which we can get a
        Vault authentication token.
    :param prefix: The prefix for configuration keys.

    """
    assert prefix.endswith(".")
    parser = config.SpecParser({
        "timeout":
        config.Optional(config.Timespan, default=datetime.timedelta(seconds=1))
    })
    options = parser.parse(prefix[:-1], app_config)

    return HvacContextFactory(secrets_store, options.timeout)
예제 #3
0
def connection_from_config(app_config: config.RawConfig, prefix: str,
                           **kwargs: Any) -> Connection:
    """Make a Connection from a configuration dictionary.

    The keys useful to :py:func:`connection_from_config` should be prefixed,
    e.g. ``amqp.hostname`` etc. The ``prefix`` argument specifies the
    prefix used to filter keys.  Each key is mapped to a corresponding keyword
    argument on the :py:class:`~kombu.connection.Connection` constructor.  Any
    keyword arguments given to this function will be passed through to the
    :py:class:`~kombu.connection.Connection` constructor. Keyword arguments
    take precedence over the configuration file.

    Supported keys:

    * ``hostname``
    * ``virtual_host``

    """
    assert prefix.endswith(".")
    parser = config.SpecParser({
        "hostname": config.String,
        "virtual_host": config.Optional(config.String)
    })
    options = parser.parse(prefix[:-1], app_config)
    return Connection(hostname=options.hostname,
                      virtual_host=options.virtual_host,
                      **kwargs)
예제 #4
0
def exchange_from_config(app_config: config.RawConfig, prefix: str,
                         **kwargs: Any) -> Exchange:
    """Make an Exchange from a configuration dictionary.

    The keys useful to :py:func:`exchange_from_config` should be prefixed,
    e.g. ``amqp.exchange_name`` etc. The ``prefix`` argument specifies the
    prefix used to filter keys.  Each key is mapped to a corresponding keyword
    argument on the :py:class:`~kombu.Exchange` constructor.  Any keyword
    arguments given to this function will be passed through to the
    :py:class:`~kombu.Exchange` constructor. Keyword arguments take precedence
    over the configuration file.

    Supported keys:

    * ``exchange_name``
    * ``exchange_type``

    """
    assert prefix.endswith(".")
    parser = config.SpecParser({
        "exchange_name": config.Optional(config.String),
        "exchange_type": config.String
    })
    options = parser.parse(prefix[:-1], app_config)
    return Exchange(name=options.exchange_name or "",
                    type=options.exchange_type,
                    **kwargs)
예제 #5
0
def engine_from_config(
    app_config: config.RawConfig,
    secrets: Optional[SecretsStore] = None,
    prefix: str = "database.",
    **kwargs: Any,
) -> Engine:
    """Make an :py:class:`~sqlalchemy.engine.Engine` from a configuration dictionary.

    The keys useful to :py:func:`engine_from_config` should be prefixed, e.g.
    ``database.url``, etc. The ``prefix`` argument specifies the prefix used to
    filter keys.

    Supported keys:

    * ``url``: the connection URL to the database, passed to
        :py:func:`~sqlalchemy.engine.url.make_url` to create the
        :py:class:`~sqlalchemy.engine.url.URL` used to connect to the database.
    * ``credentials_secret`` (optional): the key used to retrieve the database
        credentials from ``secrets`` as a :py:class:`~baseplate.lib.secrets.CredentialSecret`.
        If this is supplied, any credentials given in ``url`` we be replaced by
        these.
    * ``pool_recycle`` (optional): this setting causes the pool to recycle connections after
        the given number of seconds has passed. It defaults to -1, or no timeout.
    * ``pool_pre_ping`` (optional): when set to true, this setting causes
        sqlalchemy to perform a liveness-check query each time a connection is
        checked out of the pool.  If the liveness-check fails, the connection
        is gracefully recycled.  This ensures severed connections are handled
        more gracefully, at the cost of doing a `SELECT 1` at the start of each
        checkout. When used, this obviates most of the reasons you might use
        pool_recycle, and as such they shouldn't normally be used
        simultaneously.  Requires SQLAlchemy 1.3.

    """
    assert prefix.endswith(".")
    parser = config.SpecParser(
        {
            "url": config.String,
            "credentials_secret": config.Optional(config.String),
            "pool_recycle": config.Optional(config.Integer),
            "pool_pre_ping": config.Optional(config.Boolean),
        }
    )
    options = parser.parse(prefix[:-1], app_config)
    url = make_url(options.url)

    if options.pool_recycle is not None:
        kwargs.setdefault("pool_recycle", options.pool_recycle)

    if options.pool_pre_ping is not None:
        kwargs.setdefault("pool_pre_ping", options.pool_pre_ping)

    if options.credentials_secret:
        if not secrets:
            raise TypeError("'secrets' is required if 'credentials_secret' is set")
        credentials = secrets.get_credentials(options.credentials_secret)
        url.username = credentials.username
        url.password = credentials.password

    return create_engine(url, **kwargs)
예제 #6
0
def cluster_from_config(
    app_config: config.RawConfig,
    secrets: Optional[SecretsStore] = None,
    prefix: str = "cassandra.",
    execution_profiles: Optional[Dict[str, ExecutionProfile]] = None,
    **kwargs: Any,
) -> Cluster:
    """Make a Cluster from a configuration dictionary.

    The keys useful to :py:func:`cluster_from_config` should be prefixed, e.g.
    ``cassandra.contact_points`` etc. The ``prefix`` argument specifies the
    prefix used to filter keys.  Each key is mapped to a corresponding keyword
    argument on the :py:class:`~cassandra.cluster.Cluster` constructor.  Any
    keyword arguments given to this function will be passed through to the
    :py:class:`~cassandra.cluster.Cluster` constructor. Keyword arguments take
    precedence over the configuration file.

    Supported keys:

    * ``contact_points`` (required): comma delimited list of contact points to
      try connecting for cluster discovery
    * ``port``: The server-side port to open connections to.
    * ``credentials_secret`` (optional): the key used to retrieve the database
        credentials from ``secrets`` as a :py:class:`~baseplate.lib.secrets.CredentialSecret`.

    :param execution_profiles: Configured execution profiles to provide to the
        rest of the application.

    """
    assert prefix.endswith(".")
    parser = config.SpecParser({
        "contact_points":
        config.TupleOf(config.String),
        "port":
        config.Optional(config.Integer, default=None),
        "credentials_secret":
        config.Optional(config.String),
    })
    options = parser.parse(prefix[:-1], app_config)

    if options.port:
        kwargs.setdefault("port", options.port)

    if options.credentials_secret:
        if not secrets:
            raise TypeError(
                "'secrets' is required if 'credentials_secret' is set")
        credentials = secrets.get_credentials(options.credentials_secret)
        kwargs.setdefault(
            "auth_provider",
            PlainTextAuthProvider(username=credentials.username,
                                  password=credentials.password),
        )

    return Cluster(options.contact_points,
                   execution_profiles=execution_profiles,
                   **kwargs)
예제 #7
0
def engine_from_config(
    app_config: config.RawConfig,
    secrets: Optional[SecretsStore] = None,
    prefix: str = "database.",
    **kwargs: Any,
) -> Engine:
    """Make an :py:class:`~sqlalchemy.engine.Engine` from a configuration dictionary.

    The keys useful to :py:func:`engine_from_config` should be prefixed, e.g.
    ``database.url``, etc. The ``prefix`` argument specifies the prefix used to
    filter keys.

    Supported keys:

    * ``url``: the connection URL to the database, passed to
        :py:func:`~sqlalchemy.engine.url.make_url` to create the
        :py:class:`~sqlalchemy.engine.url.URL` used to connect to the database.
    * ``credentials_secret`` (optional): the key used to retrieve the database
        credentials from ``secrets`` as a :py:class:`~baseplate.lib.secrets.CredentialSecret`.
        If this is supplied, any credentials given in ``url`` we be replaced by
        these.
    * ``pool_recycle`` (optional): this setting causes the pool to recycle connections after
        the given number of seconds has passed. It defaults to -1, or no timeout.

    """
    assert prefix.endswith(".")
    parser = config.SpecParser({
        "url":
        config.String,
        "credentials_secret":
        config.Optional(config.String),
        "pool_recycle":
        config.Optional(config.Integer),
    })
    options = parser.parse(prefix[:-1], app_config)
    url = make_url(options.url)

    if options.pool_recycle is not None:
        kwargs.setdefault("pool_recycle", options.pool_recycle)

    if options.credentials_secret:
        if not secrets:
            raise TypeError(
                "'secrets' is required if 'credentials_secret' is set")
        credentials = secrets.get_credentials(options.credentials_secret)
        url.username = credentials.username
        url.password = credentials.password

    return create_engine(url, **kwargs)
예제 #8
0
def pool_from_config(app_config: config.RawConfig,
                     prefix: str = "redis.",
                     **kwargs: Any) -> redis.ConnectionPool:
    """Make a ConnectionPool from a configuration dictionary.

    The keys useful to :py:func:`pool_from_config` should be prefixed, e.g.
    ``redis.url``, ``redis.max_connections``, etc. The ``prefix`` argument
    specifies the prefix used to filter keys.  Each key is mapped to a
    corresponding keyword argument on the :py:class:`redis.ConnectionPool`
    constructor.

    Supported keys:

    * ``url`` (required): a URL like ``redis://localhost/0``.
    * ``max_connections``: an integer maximum number of connections in the pool
    * ``socket_connect_timeout``: how long to wait for sockets to connect. e.g.
        ``200 milliseconds`` (:py:func:`~baseplate.lib.config.Timespan`)
    * ``socket_timeout``: how long to wait for socket operations, e.g.
        ``200 milliseconds`` (:py:func:`~baseplate.lib.config.Timespan`)

    """
    assert prefix.endswith(".")
    parser = config.SpecParser({
        "url":
        config.String,
        "max_connections":
        config.Optional(config.Integer, default=None),
        "socket_connect_timeout":
        config.Optional(config.Timespan, default=None),
        "socket_timeout":
        config.Optional(config.Timespan, default=None),
    })
    options = parser.parse(prefix[:-1], app_config)

    if options.max_connections is not None:
        kwargs.setdefault("max_connections", options.max_connections)
    if options.socket_connect_timeout is not None:
        kwargs.setdefault("socket_connect_timeout",
                          options.socket_connect_timeout.total_seconds())
    if options.socket_timeout is not None:
        kwargs.setdefault("socket_timeout",
                          options.socket_timeout.total_seconds())

    return redis.BlockingConnectionPool.from_url(options.url, **kwargs)
예제 #9
0
def connection_from_config(app_config: config.RawConfig,
                           prefix: str,
                           secrets: Optional[SecretsStore] = None,
                           **kwargs: Any) -> Connection:
    """Make a Connection from a configuration dictionary.

    The keys useful to :py:func:`connection_from_config` should be prefixed,
    e.g. ``amqp.hostname`` etc. The ``prefix`` argument specifies the
    prefix used to filter keys.  Each key is mapped to a corresponding keyword
    argument on the :py:class:`~kombu.connection.Connection` constructor.  Any
    keyword arguments given to this function will be passed through to the
    :py:class:`~kombu.connection.Connection` constructor. Keyword arguments
    take precedence over the configuration file.

    Supported keys:

    * ``credentials_secret``
    * ``hostname``
    * ``virtual_host``

    """
    assert prefix.endswith(".")
    parser = config.SpecParser({
        "credentials_secret":
        config.Optional(config.String),
        "hostname":
        config.String,
        "virtual_host":
        config.Optional(config.String),
    })
    options = parser.parse(prefix[:-1], app_config)
    if options.credentials_secret:
        if not secrets:
            raise ValueError(
                "'secrets' is required if 'credentials_secret' is set")
        credentials = secrets.get_credentials(options.credentials_secret)
        kwargs.setdefault("userid", credentials.username)
        kwargs.setdefault("password", credentials.password)
    return Connection(hostname=options.hostname,
                      virtual_host=options.virtual_host,
                      **kwargs)
예제 #10
0
def http_adapter_from_config(
    app_config: config.RawConfig, prefix: str, **kwargs: Any
) -> HTTPAdapter:
    """Make an HTTPAdapter from a configuration dictionary.

    The keys useful to :py:func:`http_adapter_from_config` should be prefixed,
    e.g. ``http.pool_connections``, ``http.max_retries``, etc. The ``prefix``
    argument specifies the prefix used. Each key is mapped to a corresponding
    keyword argument on the :py:class:`~requests.adapters.HTTPAdapter`
    constructor.

    Supported keys:

    * ``pool_connections``: The number of connections to cache (default: 10).
    * ``pool_maxsize``: The maximum number of connections to keep in the pool
      (default: 10).
    * ``max_retries``: How many times to retry DNS lookups or connection
      attempts, but never sending data (default: 0).
    * ``pool_block``: Whether the connection pool will block when trying to get
      a connection (default: false).

    Additionally, the rules for Advocate's address filtering can be configured
    with the ``filter`` sub-keys:

    * ``filter.ip_allowlist``: A comma-delimited list of IP addresses (1.2.3.4)
        or CIDR-notation (1.2.3.0/24) ranges that the client can always connect to
        (default: anything not on the local network).
    * ``filter.ip_denylist``: A comma-delimited list of IP addresses or
        CIDR-notation ranges the client may never connect to (default: the local network).
    * ``filter.port_allowlist``: A comma-delimited list of TCP port numbers
        that the client can connect to (default: 80, 8080, 443, 8443, 8000).
    * ``filter.port_denylist``: A comma-delimited list of TCP port numbers that
        the client may never connect to (default: none).
    * ``filter.hostname_denylist``: A comma-delimited list of hostnames that
        the client may never connect to (default: none).
    * ``filter.allow_ipv6``: Should the client be allowed to connect to IPv6
        hosts? (default: false, note: IPv6 is tricky to apply filtering rules
        comprehensively to).

    """
    assert prefix.endswith(".")
    parser = config.SpecParser(
        {
            "pool_connections": config.Optional(config.Integer, default=10),
            "pool_maxsize": config.Optional(config.Integer, default=10),
            "max_retries": config.Optional(config.Integer, default=0),
            "pool_block": config.Optional(config.Boolean, default=False),
            "filter": {
                "ip_allowlist": config.Optional(config.TupleOf(ipaddress.ip_network)),
                "ip_denylist": config.Optional(config.TupleOf(ipaddress.ip_network)),
                "port_allowlist": config.Optional(config.TupleOf(int)),
                "port_denylist": config.Optional(config.TupleOf(int)),
                "hostname_denylist": config.Optional(config.TupleOf(config.String)),
                "allow_ipv6": config.Optional(config.Boolean, default=False),
            },
        }
    )
    options = parser.parse(prefix[:-1], app_config)

    if options.pool_connections is not None:
        kwargs.setdefault("pool_connections", options.pool_connections)
    if options.pool_maxsize is not None:
        kwargs.setdefault("pool_maxsize", options.pool_maxsize)
    if options.max_retries is not None:
        kwargs.setdefault("max_retries", options.max_retries)
    if options.pool_block is not None:
        kwargs.setdefault("pool_block", options.pool_block)

    kwargs.setdefault(
        "validator",
        AddrValidator(
            ip_whitelist=options.filter.ip_allowlist,
            ip_blacklist=options.filter.ip_denylist,
            port_whitelist=options.filter.port_allowlist,
            port_blacklist=options.filter.port_denylist,
            hostname_blacklist=options.filter.hostname_denylist,
            allow_ipv6=options.filter.allow_ipv6,
        ),
    )
    return ValidatingHTTPAdapter(**kwargs)
예제 #11
0
def pool_from_config(
    app_config: config.RawConfig,
    prefix: str = "memcache.",
    serializer: Optional[Serializer] = None,
    deserializer: Optional[Deserializer] = None,
) -> PooledClient:
    """Make a PooledClient from a configuration dictionary.

    The keys useful to :py:func:`pool_from_config` should be prefixed, e.g.
    ``memcache.endpoint``, ``memcache.max_pool_size``, etc. The ``prefix``
    argument specifies the prefix used to filter keys. Each key is mapped to a
    corresponding keyword argument on the
    :py:class:`~pymemcache.client.base.PooledClient` constructor.

    Supported keys:

    * ``endpoint`` (required): a string representing a host and port to connect
        to memcached service, e.g. ``localhost:11211`` or ``127.0.0.1:11211``.
    * ``max_pool_size``: an integer for the maximum pool size to use, by default
        this is ``2147483648``.
    * ``connect_timeout``: how long (as
        :py:func:`~baseplate.lib.config.Timespan`) to wait for a connection to
        memcached server. Defaults to the underlying socket default timeout.
    * ``timeout``: how long (as :py:func:`~baseplate.lib.config.Timespan`) to
        wait for calls on the socket connected to memcache. Defaults to the
        underlying socket default timeout.

    :param app_config: the raw application configuration
    :param prefix: prefix for configuration keys
    :param serializer: function to serialize values to strings suitable
        for being stored in memcached. An example is
        :py:func:`~baseplate.clients.memcache.lib.make_dump_and_compress_fn`.
    :param deserializer: function to convert strings returned from
        memcached to arbitrary objects, must be compatible with ``serializer``.
        An example is :py:func:`~baseplate.clients.memcache.lib.decompress_and_load`.

    :returns: :py:class:`pymemcache.client.base.PooledClient`

    """
    assert prefix.endswith(".")
    parser = config.SpecParser({
        "endpoint":
        config.Endpoint,
        "max_pool_size":
        config.Optional(config.Integer, default=None),
        "connect_timeout":
        config.Optional(config.TimespanWithLegacyFallback, default=None),
        "timeout":
        config.Optional(config.TimespanWithLegacyFallback, default=None),
        "no_delay":
        config.Optional(config.Boolean, default=True),
    })
    options = parser.parse(prefix[:-1], app_config)

    return PooledClient(
        server=options.endpoint.address,
        connect_timeout=options.connect_timeout
        and options.connect_timeout.total_seconds(),
        timeout=options.timeout and options.timeout.total_seconds(),
        serializer=serializer,
        deserializer=deserializer,
        no_delay=options.no_delay,
        max_pool_size=options.max_pool_size,
    )
예제 #12
0
def cluster_pool_from_config(
        app_config: config.RawConfig,
        prefix: str = "rediscluster.",
        **kwargs: Any) -> rediscluster.ClusterConnectionPool:
    """Make a ClusterConnectionPool from a configuration dictionary.

    The keys useful to :py:func:`cluster_pool_from_config` should be prefixed, e.g.
    ``rediscluster.url``, ``rediscluster.max_connections``, etc. The ``prefix`` argument
    specifies the prefix used to filter keys.  Each key is mapped to a
    corresponding keyword argument on the :py:class:`rediscluster.ClusterConnectionPool`
    constructor.
    Supported keys:
    * ``url`` (required): a URL like ``redis://localhost/0``.
    * ``max_connections``: an integer maximum number of connections in the pool
    * ``max_connections_per_node``: Boolean, whether max_connections should be applied
        globally (False) or per node (True).
    * ``skip_full_coverage_check``: Skips the check of cluster-require-full-coverage
      config, useful for clusters without the CONFIG command (like aws)
    * ``nodemanager_follow_cluster``: Tell the node manager to reuse the last set of
      nodes it was operating on when intializing.
    * ``read_from_replicas``: (Boolean) Whether the client should send all read queries to
        replicas instead of just the primary
    * ``timeout``: how long to wait for sockets to connect. e.g.
        ``200 milliseconds`` (:py:func:`~baseplate.lib.config.Timespan`)
    * ``track_key_reads_sample_rate``: If greater than zero, which percentage of requests will
        be inspected to keep track of hot key usage within Redis when reading.
        Every command inspected will result in a write to a sorted set
        (baseplate-hot-key-tracker-reads) for tracking.
    * ``track_key_writes_sample_rate``: If greater than zero, which percentage of requests will
        be inspected to keep track of hot key usage within Redis when writing.
        Every command inspected will result in a write to a sorted set
        (baseplate-hot-key-tracker-reads) for tracking.

    """
    assert prefix.endswith(".")

    parser = config.SpecParser({
        "url":
        config.String,
        "max_connections":
        config.Optional(config.Integer, default=50),
        "max_connections_per_node":
        config.Optional(config.Boolean, default=False),
        "timeout":
        config.Optional(config.Timespan, default=None),
        "read_from_replicas":
        config.Optional(config.Boolean, default=True),
        "skip_full_coverage_check":
        config.Optional(config.Boolean, default=True),
        "nodemanager_follow_cluster":
        config.Optional(config.Boolean, default=None),
        "decode_responses":
        config.Optional(config.Boolean, default=True),
        "track_key_reads_sample_rate":
        config.Optional(config.Float, default=0),
        "track_key_writes_sample_rate":
        config.Optional(config.Float, default=0),
    })

    options = parser.parse(prefix[:-1], app_config)

    # We're explicitly setting a default here because of https://github.com/Grokzen/redis-py-cluster/issues/435
    kwargs.setdefault("max_connections", options.max_connections)

    kwargs.setdefault("decode_responses", options.decode_responses)

    if options.nodemanager_follow_cluster is not None:
        kwargs.setdefault("nodemanager_follow_cluster",
                          options.nodemanager_follow_cluster)
    if options.skip_full_coverage_check is not None:
        kwargs.setdefault("skip_full_coverage_check",
                          options.skip_full_coverage_check)
    if options.timeout is not None:
        kwargs.setdefault("timeout", options.timeout.total_seconds())

    if options.read_from_replicas:
        connection_pool = ClusterWithReadReplicasBlockingConnectionPool.from_url(
            options.url, **kwargs)
    else:
        connection_pool = rediscluster.ClusterBlockingConnectionPool.from_url(
            options.url, **kwargs)

    connection_pool.track_key_reads_sample_rate = options.track_key_reads_sample_rate
    connection_pool.track_key_writes_sample_rate = options.track_key_writes_sample_rate

    connection_pool.read_from_replicas = options.read_from_replicas
    connection_pool.skip_full_coverage_check = options.skip_full_coverage_check

    return connection_pool