Exemplo n.º 1
0
    def __init__(
        self,
        endpoint: config.EndpointConfiguration,
        size: int = 10,
        max_age: int = 120,
        timeout: int = 1,
        max_connection_attempts: Optional[int] = None,
        max_retries: Optional[int] = None,
        protocol_factory: TProtocolFactory = THeaderProtocol.
        THeaderProtocolFactory(),
    ):
        if max_connection_attempts and max_retries:
            raise Exception(
                "do not mix max_retries and max_connection_attempts")

        if max_retries:
            warn_deprecated(
                "ThriftConnectionPool's max_retries is now named max_connection_attempts"
            )
            max_connection_attempts = max_retries
        elif not max_connection_attempts:
            max_connection_attempts = 3

        self.endpoint = endpoint
        self.max_age = max_age
        self.retry_policy = RetryPolicy.new(attempts=max_connection_attempts)
        self.timeout = timeout
        self.protocol_factory = protocol_factory

        self.size = size
        self.pool: ProtocolPool = queue.LifoQueue()
        for _ in range(size):
            self.pool.put(None)
Exemplo n.º 2
0
    def configure_context(self, *args: Any, **kwargs: Any) -> None:  # noqa: F811
        """Add a number of objects to each request's context object.

        Configure and attach multiple clients to the
        :py:class:`~baseplate.RequestContext` in one place. This takes a full
        configuration spec like :py:func:`baseplate.lib.config.parse_config`
        and will attach the specified structure onto the context object each
        request.

        For example, a configuration like::

            baseplate = Baseplate(app_config)
            baseplate.configure_context({
                "cfg": {
                    "doggo_is_good": config.Boolean,
                },
                "cache": MemcachedClient(),
                "cassandra": {
                    "foo": CassandraClient("foo_keyspace"),
                    "bar": CassandraClient("bar_keyspace"),
                },
            })

        would build a context object that could be used like::

            assert context.cfg.doggo_is_good == True
            context.cache.get("example")
            context.cassandra.foo.execute()

        :param app_config: The raw stringy configuration dictionary.
        :param context_spec: A specification of what the configuration should
            look like.

        """

        if len(args) == 1:
            kwargs["context_spec"] = args[0]
        elif len(args) == 2:
            kwargs["app_config"] = args[0]
            kwargs["context_spec"] = args[1]
        else:
            raise Exception("bad parameters to configure_context")

        if "app_config" in kwargs:
            if self._app_config:
                raise Exception("pass app_config to the constructor or this method but not both")

            warn_deprecated(
                "Passing configuration to configure_context is deprecated in "
                "favor of passing it to the Baseplate constructor"
            )
            app_config = kwargs["app_config"]
        else:
            app_config = self._app_config
        context_spec = kwargs["context_spec"]

        cfg = config.parse_config(app_config, context_spec)
        self._context_config.update(cfg)
Exemplo n.º 3
0
def consume(
    baseplate: Baseplate,
    exchange: Exchange,
    connection: Connection,
    queue_name: str,
    routing_keys: Sequence[str],
    handler: Handler,
) -> NoReturn:
    """Create a long-running process to consume messages from a queue.

    A queue with name ``queue_name`` is created and bound to the
    ``routing_keys`` so messages published to the ``routing_keys`` are routed
    to the queue.

    Next, the process registers a consumer that receives messages from
    the queue and feeds them to the ``handler``.

    The ``handler`` function must take 3 arguments:

    * ``context``: a baseplate context
    * ``message_body``: the text body of the message
    * ``message``: :py:class:`kombu.message.Message`

    The consumer will automatically ``ack`` each message after the handler
    method exits. If there is an error in processing and the message must be
    retried the handler should raise an exception to crash the process. This
    will prevent the ``ack`` and the message will be re-queued at the head of
    the queue.

    :param baseplate: A baseplate instance for the service.
    :param exchange:
    :param connection:
    :param queue_name: The name of the queue.
    :param routing_keys: List of routing keys.
    :param handler: The handler method.

    """
    warn_deprecated(
        "baseplate.frameworks.queue_consumer is deprecated and will be removed "
        "in the next major release.  You should migrate your consumers to use "
        "baseplate.server.queue_consumer.\n"
        "https://baseplate.readthedocs.io/en/stable/api/baseplate/frameworks/queue_consumer/deprecated.html"
    )
    queues = []
    for routing_key in routing_keys:
        queues.append(
            Queue(name=queue_name, exchange=exchange, routing_key=routing_key))

    logger.info("registering %s as a handler for %r", handler.__name__, queues)
    kombu_consumer = KombuConsumer.new(connection, queues)

    logger.info("waiting for messages")
    while True:
        context = baseplate.make_context_object()
        with baseplate.make_server_span(context, queue_name) as span:
            message = kombu_consumer.get_message(span)
            handler(context, message.body, message)
            message.ack()
Exemplo n.º 4
0
def make_client(
    service_name: str,
    tracing_endpoint: Optional[config.EndpointConfiguration] = None,
    tracing_queue_name: Optional[str] = None,
    max_span_queue_size: int = 50000,
    num_span_workers: int = 5,
    span_batch_interval: float = 0.5,
    num_conns: int = 100,
    sample_rate: float = 0.1,
    log_if_unconfigured: bool = True,
) -> TracingClient:
    """Create and return a tracing client based on configuration options.

    This client can be used by the :py:class:`TraceBaseplateObserver`.

    :param service_name: The name for the service this observer
        is registered to.
    :param tracing_endpoint: Destination to record span data.
    :param tracing_queue_name: POSIX queue name for reporting spans.
    :param num_conns: pool size for remote recorder connection pool.
    :param max_span_queue_size: span processing queue limit.
    :param num_span_workers: number of worker threads for span processing.
    :param span_batch_interval: wait time for span processing in seconds.
    :param sample_rate: percentage of unsampled requests to record traces for.
    """
    recorder: Recorder
    if tracing_queue_name:
        recorder = SidecarRecorder(tracing_queue_name)
    elif tracing_endpoint:
        warn_deprecated(
            "In-app trace publishing is deprecated in favor of the sidecar model."
        )
        remote_addr = str(tracing_endpoint.address)
        recorder = RemoteRecorder(
            remote_addr,
            num_conns=num_conns,
            max_queue_size=max_span_queue_size,
            num_workers=num_span_workers,
            batch_wait_interval=span_batch_interval,
        )
    elif log_if_unconfigured:
        recorder = LoggingRecorder(
            max_queue_size=max_span_queue_size,
            num_workers=num_span_workers,
            batch_wait_interval=span_batch_interval,
        )
    else:
        recorder = NullRecorder(
            max_queue_size=max_span_queue_size,
            num_workers=num_span_workers,
            batch_wait_interval=span_batch_interval,
        )

    return TracingClient(service_name, sample_rate, recorder)
Exemplo n.º 5
0
    def get_vault_token(self) -> str:
        """Return a Vault authentication token.

        The token will have policies attached based on the current EC2 server's
        Vault role. This is only necessary if talking directly to Vault.

        """
        warn_deprecated(
            "get_vault_token is deprecated and will be removed in v3.0.")
        data, _ = self._get_data()
        return data["vault"]["token"]
Exemplo n.º 6
0
    def __init__(
        self,
        baseplate: Baseplate,
        trust_trace_headers: Optional[bool] = None,
        edge_context_factory: Optional[EdgeRequestContextFactory] = None,
        header_trust_handler: Optional[HeaderTrustHandler] = None,
    ):
        self.baseplate = baseplate
        self.trust_trace_headers = bool(trust_trace_headers)
        if trust_trace_headers is not None:
            warn_deprecated(
                "setting trust_trace_headers is deprecated in favor of using"
                " a header trust handler."
            )
        self.edge_context_factory = edge_context_factory

        if header_trust_handler:
            self.header_trust_handler = header_trust_handler
        else:
            self.header_trust_handler = StaticTrustHandler(trust_headers=self.trust_trace_headers)
Exemplo n.º 7
0
    def _get_experiment(self, name: str) -> Optional[Experiment]:
        if name in self._global_cache:
            return self._global_cache[name]

        if self._cfg_data is None:
            warn_deprecated("config_watcher will be removed in Baseplate 2.0.")
            self._cfg_data = self._get_config()

        if name not in self._cfg_data:
            logger.info("Experiment <%r> not found in experiment config", name)
            return None

        try:
            experiment = parse_experiment(self._cfg_data[name])
            self._global_cache[name] = experiment
            return experiment
        except Exception as err:
            logger.error("Invalid configuration for experiment %s: %s", name,
                         err)
            return None
Exemplo n.º 8
0
    def from_config(
            cls, app_config: config.RawConfig) -> "TimeoutBaseplateObserver":
        cfg = config.parse_config(
            app_config,
            {
                "server_timeout": {
                    "default":
                    config.Optional(config.TimespanOrInfinite, default=None),
                    "debug":
                    config.Optional(config.Boolean, default=False),
                    "by_endpoint":
                    config.DictOf(config.TimespanOrInfinite),
                }
            },
        )

        if cfg.server_timeout.default is None:
            warn_deprecated(
                "No server_timeout.default configured. Defaulting to no timeout. "
                "Set the default timeout to 'infinite' or a timespan like '2 seconds'. "
                "This will become mandatory in Baseplate.py 2.0.")
            cfg.server_timeout.default = config.InfiniteTimespan

        return cls(cfg.server_timeout)
Exemplo n.º 9
0
 def __init__(self, secret_key: bytes):
     warn_deprecated(
         "MessageSigner is deprecated in favor of the top-level "
         "make_signature and validate_signature functions which "
         "accept versioned secrets from the secret store.")
     self.secret = VersionedSecret.from_simple_secret(secret_key)
Exemplo n.º 10
0
 def get_vault_url(self) -> str:
     """Return the URL for accessing Vault directly."""
     warn_deprecated(
         "get_vault_url is deprecated and will be removed in v3.0.")
     data, _ = self._get_data()
     return data["vault"]["url"]
Exemplo n.º 11
0
def parse_experiment(config: Dict[str, str]) -> Experiment:
    """Parse an experiment config dict and return an appropriate Experiment class.

    The config dict is expected to have the following values:

        * **id**: Integer experiment ID, should be unique for each experiment.
        * **name**: String experiment name, should be unique for each
          experiment.
        * **owner**: The group or individual that owns this experiment.
        * **version**: String to identify the specific version of the
          experiment.
        * **start_ts**: A float of seconds since the epoch of date and time
          when you want the experiment to start.  If an experiment has not been
          started yet, it is considered disabled.
        * **stop_ts**: A float of seconds since the epoch of date and time when
          you want the experiment to stop.  Once an experiment is stopped, it
          is considered disabled.
        * **type**: String specifying the type of experiment to run.  If this
          value is not recognized, the experiment will be considered disabled.
        * **experiment**: The experiment config dict for the specific type of
          experiment.  The format of this is determined by the specific
          experiment type.
        * **enabled**:  (Optional) If set to False, the experiment will be
          disabled and calls to experiment.variant will always return None and
          will not log bucketing events to the event pipeline. Defaults to
          True.
        * **global_override**: (Optional) If this is set, calls to
          experiment.variant will always return the override value and will not
          log bucketing events to the event pipeline.

    :param config: Configuration dict for the experiment you wish to run.
    :return: A subclass of :py:class:`Experiment` for the given experiment
        type.
    """
    experiment_type = config.get("type")
    if experiment_type:
        experiment_type = experiment_type.lower()
    experiment_id = config.get("id")
    if not isinstance(experiment_id, int):
        raise TypeError("Integer id must be provided for experiment.")
    name = config.get("name")
    owner = config.get("owner")
    start_ts = config.get("start_ts")
    stop_ts = config.get("stop_ts")
    if start_ts is None or stop_ts is None:
        if "expires" in config:
            warn_deprecated(
                "The 'expires' field in experiment %s is deprecated, you should "
                "use 'start_ts' and 'stop_ts'." % name
            )
            start_ts = time.time()
            expires = datetime.strptime(config["expires"], ISO_DATE_FMT)
            epoch = datetime(1970, 1, 1)
            stop_ts = (expires - epoch).total_seconds()
        else:
            raise ValueError(
                f"Invalid config for experiment {name}, missing start_ts and/or stop_ts."
            )

    if "version" in config:
        version = config["version"]
    else:
        warn_deprecated(
            "The 'version' field is not in experiment %s.  This field will be "
            "required in the future." % name
        )
        version = None

    now = time.time()

    enabled = config.get("enabled", True)
    if now < start_ts or now > stop_ts:
        enabled = False

    if not enabled and experiment_type in legacy_type_class_map:
        return ForcedVariantExperiment(None)

    experiment_config = config["experiment"]

    if "global_override" in config:
        # We want to check if "global_override" is in config rather than
        # checking config.get("global_override") because global_override = None
        # is a valid setting.
        override = config.get("global_override")
        return ForcedVariantExperiment(override)

    if experiment_type in legacy_type_class_map:
        experiment_class = legacy_type_class_map[experiment_type]
        return experiment_class.from_dict(
            id=experiment_id, name=name, owner=owner, version=version, config=experiment_config
        )

    if experiment_type in simple_type_class_list:
        return SimpleExperiment.from_dict(
            id=experiment_id,
            name=name,
            owner=owner,
            start_ts=start_ts,
            stop_ts=stop_ts,
            enabled=enabled,
            config=experiment_config,
            variant_type=experiment_type,
        )

    logger.warning(
        "Found an experiment <%s:%s> with an unknown experiment type <%s> "
        "that is owned by <%s>. Please clean up.",
        experiment_id,
        name,
        experiment_type,
        owner,
    )
    return ForcedVariantExperiment(None)
Exemplo n.º 12
0
    def variant(
        self,
        experiment_name: Optional[str] = None,
        user: Optional[User] = None,
        bucketing_event_override: Optional[bool] = None,
        name: Optional[str] = None,  # DEPRECATED
        **kwargs: str,
    ) -> Optional[str]:
        r"""Return which variant, if any, is active.

        If a variant is active, a bucketing event will be logged to the event
        pipeline unless any one of the following conditions are met:

        1. bucketing_event_override is set to False.
        2. The experiment specified by "name" explicitly disables bucketing
           events.
        3. We have already logged a bucketing event for the value specified by
           ``experiment.get_unique_id(\*\*kwargs)`` within the current
           request.

        Since checking the status an experiment will fire a bucketing event, it
        is best to only check the variant when you are making the decision that
        will expose the experiment to the user.  If you absolutely must check
        the status of an experiment before you are sure that the experiment
        will be exposed to the user, you can use `bucketing_event_override` to
        disabled bucketing events for that check.

        :param experiment_name: Name of the experiment you want to run.
        :param name: DEPRECATED - use experiment_name instead
        :param user: User object for the user you want to check the experiment
            variant for.  If you set user, the experiment parameters for that user
            ("user_id", "logged_in", and "user_roles") will be extracted and added
            to the inputs to the call to Experiment.variant.  The user's
            event_fields will also be extracted and added to the bucketing event if
            one is  logged.  It is recommended that you provide a value for user
            rather than setting the user parameters manually in ``kwargs``.
        :param bucketing_event_override: Set if you need to override the
            default behavior for sending bucketing events.  This parameter should
            be set sparingly as it breaks the assumption that you will fire a
            bucketing event when you first check the state of an experiment.  If
            set to False, will never send a bucketing event.  If set to None, no
            override will be applied.  Set to None by default.  Note that setting
            bucketing_event_override to True has no effect, it will behave the same
            as when it is set to None.
        :param kwargs:  Arguments that will be passed to experiment.variant to
            determine bucketing, targeting, and overrides. These values will also
            be passed to the logger.

        :return: Variant name if a variant is active, None otherwise.

        .. versionchanged:: 1.5
            ``name`` was renamed to ``experiment_name``
        """
        experiment_name = experiment_name or name
        assert experiment_name
        if name:
            warn_deprecated(
                f"The 'name' parameter on 'variant' method is deprecated, use 'experiment_name' instead. (name={name})"
            )
        experiment = self._get_experiment(experiment_name)

        if experiment is None:
            return None

        inputs = dict(kwargs)

        if user:
            inputs.update(user.event_fields())

        variant = experiment.variant(**inputs)

        bucketing_id = experiment.get_unique_id(**inputs)

        do_log = True

        if not bucketing_id:
            do_log = False

        if variant is None:
            do_log = False

        if bucketing_event_override is False:
            do_log = False

        if bucketing_id and bucketing_id in self._already_bucketed:
            do_log = False

        do_log = do_log and experiment.should_log_bucketing()

        if do_log:
            assert bucketing_id
            self._event_logger.log(
                experiment=experiment,
                variant=variant,
                user_id=inputs.get("user_id"),
                logged_in=inputs.get("logged_in"),
                cookie_created_timestamp=inputs.get(
                    "cookie_created_timestamp"),
                app_name=inputs.get("app_name"),
                event_type=EventType.BUCKET,
                inputs=inputs,
                span=self._span,
            )
            self._already_bucketed.add(bucketing_id)

        return variant
Exemplo n.º 13
0
    def configure_observers(
        self, app_config: Optional[config.RawConfig] = None, module_name: Optional[str] = None
    ) -> None:
        """Configure diagnostics observers based on application configuration.

        This installs all the currently supported observers that have settings
        in the configuration file.

        See :py:mod:`baseplate.observers` for the configuration settings
        available for each observer.

        :param app_config: The application configuration which should have
            settings for the error reporter. If not specified, the config must be passed
            to the Baseplate() constructor.
        :param module_name: Name of the root package of the application. If not specified,
            will be guessed from the package calling this function.

        """
        skipped = []

        if app_config:
            if self._app_config:
                raise Exception("pass app_config to the constructor or this method but not both")

            warn_deprecated(
                "Passing configuration to configure_observers is deprecated in "
                "favor of passing it to the Baseplate constructor"
            )
        else:
            app_config = self._app_config

        self.configure_logging()

        if gevent.monkey.is_module_patched("socket"):
            # pylint: disable=cyclic-import
            from baseplate.observers.timeout import TimeoutBaseplateObserver

            timeout_observer = TimeoutBaseplateObserver.from_config(app_config)
            self.register(timeout_observer)
        else:
            skipped.append("timeout")
        if "metrics.tagging" in app_config:
            if "metrics.namespace" in app_config:
                raise ValueError("metrics.namespace not allowed with metrics.tagging")
            from baseplate.lib.metrics import metrics_client_from_config

            metrics_client = metrics_client_from_config(app_config)
            self.configure_tagged_metrics(metrics_client)
        elif "metrics.namespace" in app_config:
            from baseplate.lib.metrics import metrics_client_from_config

            metrics_client = metrics_client_from_config(app_config)
            self.configure_metrics(metrics_client)
        else:
            skipped.append("metrics")

        if "tracing.service_name" in app_config:
            from baseplate.observers.tracing import tracing_client_from_config

            tracing_client = tracing_client_from_config(app_config)
            self.configure_tracing(tracing_client)
        else:
            skipped.append("tracing")

        if "sentry.dsn" in app_config:
            from baseplate.observers.sentry import error_reporter_from_config

            if module_name is None:
                module_name = get_calling_module_name()

            error_reporter = error_reporter_from_config(app_config, module_name)
            self.configure_error_reporting(error_reporter)
        else:
            skipped.append("error_reporter")

        if skipped:
            logger.debug(
                "The following observers are unconfigured and won't run: %s", ", ".join(skipped)
            )
Exemplo n.º 14
0
 def shutting_down(self) -> bool:
     warn_deprecated(
         "SERVER_STATE.shutting_down is deprecated in favor of SERVER_STATE.state"
     )
     return self.state == ServerLifecycle.SHUTTING_DOWN