Beispiel #1
0
    def test_comma_separated_list_empty(self):
        self.create_fixture(typed(comma_separated_list, mock_value=""))
        loader = load_from_dict()

        metadata = Metadata("test", testing=True)
        config = configure(self.registry.defaults, metadata, loader)
        assert_that(config, has_entries(foo=has_entries(value=[], ), ))
Beispiel #2
0
    def test_typed_optional(self):
        self.create_fixture(typed(int))
        loader = load_from_dict()

        config = configure(self.registry.defaults, self.metadata, loader)
        assert_that(config, has_entries(
            foo=has_entries(
                value=None,
            ),
        ))
Beispiel #3
0
    def test_nullable_null_default(self):
        self.create_fixture(value=typed(
            int,
            default_value=None,
            nullable=True,
        ))
        loader = load_from_dict()

        config = configure(self.registry.defaults, self.metadata, loader)
        assert_that(config, has_entries(foo=has_entries(value=None, ), ))
Beispiel #4
0
    def test_nullable(self):
        self.create_fixture(value=typed(
            int,
            default_value=0,
            nullable=True,
            mock_value=None,
        ))
        loader = load_from_dict()

        metadata = Metadata("test", testing=True)
        config = configure(self.registry.defaults, metadata, loader)
        assert_that(config, has_entries(foo=has_entries(value=None, ), ))
Beispiel #5
0
    def test_typed_converted(self):
        self.create_fixture(typed(int))
        loader = load_from_dict(
            foo=dict(
                value="1",
            ),
        )

        config = configure(self.registry.defaults, self.metadata, loader)
        assert_that(config, has_entries(
            foo=has_entries(
                value=1,
            ),
        ))
Beispiel #6
0
    def test_boolean_typed_converted(self):
        self.create_fixture(
            bar=typed(bool, default_value=None),
            baz=typed(bool, default_value=None),
            qux=typed(bool, default_value=None),
            kog=typed(bool, default_value=None),
        )
        loader = load_from_dict(foo=dict(
            bar="False",
            baz="True",
            qux="false",
            kog="true",
        ), )

        config = configure(self.registry.defaults, self.metadata, loader)
        assert_that(
            config,
            has_entries(foo=has_entries(
                bar=False,
                baz=True,
                qux=False,
                kog=True,
            ), ))
Beispiel #7
0
                # expected behavior (e.g. 404)
                if not request_info.options.log_as_debug:
                    logger.info(request_info.to_dict())
                else:
                    logger.debug(request_info.to_dict())

        # Setting request state for future middleware functions
        request.state.request_info = request_info

        return response

    return partial(audit_request, graph, options)


@defaults(
    include_request_body_status=typed(
        type=int, default_value=DEFAULT_INCLUDE_REQUEST_BODY_STATUS),
    include_response_body_status=typed(
        type=int, default_value=DEFAULT_INCLUDE_RESPONSE_BODY_STATUS),
    include_path=typed(type=boolean, default_value=False),
    include_query_string=typed(type=boolean, default_value=False),
    log_as_debug=typed(type=boolean, default_value=False),
)
def configure_audit_middleware(graph):
    """
    Configure audit middleware

    """
    options = AuditOptions(
        include_request_body_status=graph.config.audit_middleware.
        include_request_body_status,
        include_response_body_status=graph.config.audit_middleware.
Beispiel #8
0
    def test_false_default(self):
        self.create_fixture(value=typed(bool, default_value=False))
        loader = load_from_dict()

        config = configure(self.registry.defaults, self.metadata, loader)
        assert_that(config, has_entries(foo=has_entries(value=False, ), ))
        resource_dict = graph.config.sns_topic_arns.get(lifecycle_change, {})
        for resource_name, topic in iter_topic_mappings(resource_dict):
            media_type = make_media_type(resource_name, lifecycle_change)
            sns_topic_arns[media_type] = topic

    return sns_topic_arns


@defaults(
    profile_name=None,
    region_name=None,
    endpoint_url=None,
    mock_sns=True,
    skip=None,
    # the size used to determine batching in the deferred batch producer
    deferred_batch_size=typed(int, default_value=100),
    # SNS endpoint timeout configuration
    connect_timeout=typed(int, default_value=60),
    read_timeout=typed(int, default_value=60),
)
def configure_sns_producer(graph):
    """
    Configure an SNS producer.

    The SNS Producer requires the following collaborators:
        - Opaque from microcosm.opaque for capturing context information
        - an aws sns client, i.e. from boto.
        - pubsub message codecs: see tests for examples.
        - sns topic arns: see tests for examples.

    """
Beispiel #10
0
        resource_dict = graph.config.sns_topic_arns.get(lifecycle_change, {})
        for resource_name, topic in iter_topic_mappings(resource_dict):
            media_type = make_media_type(resource_name, lifecycle_change)
            sns_topic_arns[media_type] = topic

    return sns_topic_arns


@defaults(
    profile_name=None,
    region_name=None,
    endpoint_url=None,
    mock_sns=True,
    skip=None,
    # the size used to determine batching in the deferred batch producer
    deferred_batch_size=typed(int, default_value=100),
)
def configure_sns_producer(graph):
    """
    Configure an SNS producer.

    The SNS Producer requires the following collaborators:
        - Opaque from microcosm.opaque for capturing context information
        - an aws sns client, i.e. from boto.
        - pubsub message codecs: see tests for examples.
        - sns topic arns: see tests for examples.

    """
    if graph.metadata.testing:
        from unittest.mock import MagicMock
Beispiel #11
0
                    tags=tags + [
                        f"classifier:{normalize_status_code(request_info.status_code)}"
                    ],
                )

            if request_info.timing.get("elapsed_time"):
                elapsed_ms = request_info.timing["elapsed_time"]
                graph.metrics.histogram(
                    name_for(key),
                    elapsed_ms,
                    tags=tags,
                )

        return response

    return partial(route_metrics, graph)


@defaults(
    enabled=typed(boolean, default_value=True), )
def configure_route_metrics(graph):
    """
    Configure route metrics

    """
    metrics = get_metrics(graph)
    enabled = bool(metrics and metrics.host != "localhost"
                   and graph.config.route_metrics.enabled)
    if enabled:
        graph.app.middleware("http")(create_route_metrics(graph))
Beispiel #12
0
    session = Session(profile_name=profile_name)
    return session.client(
        "sqs",
        endpoint_url=endpoint_url,
        region_name=region_name,
    )


@defaults(
    endpoint_url=None,
    profile_name=None,
    region_name=None,
    # backoff policy
    backoff_policy="NaiveBackoffPolicy",
    # SQS will not return more than ten messages at a time
    limit=typed(int, default_value=10),
    # SQS will only return a few messages at time unless long polling is enabled (>0)
    wait_seconds=typed(int, default_value=1),
    # On error, change the visibility timeout when nacking
    message_retry_visibility_timeout_seconds=typed(int, default_value=5),
)
def configure_sqs_consumer(graph):
    """
    Configure an SQS consumer.

    """
    sqs_queue_url = graph.config.sqs_consumer.sqs_queue_url

    if graph.metadata.testing or sqs_queue_url == "test":
        from unittest.mock import MagicMock
        sqs_client = MagicMock()
Beispiel #13
0
"""
Neo4J driver factory.

"""
from logging import getLogger, INFO
from microcosm.api import defaults, typed
from microcosm.config.types import boolean

from neo4j.v1 import GraphDatabase


@defaults(
    # NB: some features are not available unless enabled
    enterprise=typed(boolean, default_value=False),
    password="******",
    uri=None,
    username="******",
)
def configure_neo4j_driver(graph):
    if graph.metadata.testing:
        logger = getLogger("neobolt")
        logger.level = INFO

        default_uri = "bolt://localhost:17687"
    else:
        default_uri = "bolt://localhost:7687"

    return GraphDatabase.driver(
        graph.config.neo4j.uri or default_uri,
        auth=(
            graph.config.neo4j.username,
Beispiel #14
0
Routing registration support.

Intercepts Flask's normal route registration to inject conventions.

"""
from flask_cors import cross_origin
from microcosm.api import defaults, typed
from microcosm.config.types import boolean
from microcosm_logging.decorators import context_logger


@defaults(
    converters=[
        "uuid",
    ],
    enable_audit=typed(boolean, default_value=True),
    enable_basic_auth=typed(boolean, default_value=False),
    enable_context_logger=typed(boolean, default_value=True),
    enable_cors=typed(boolean, default_value=True),
)
def configure_route_decorator(graph):
    """
    Configure a flask route decorator that operates on `Operation` and `Namespace` objects.

    By default, enables CORS support, assuming that service APIs are not exposed
    directly to browsers except when using API browsing tools.

    Usage:

        @graph.route(ns.collection_path, Operation.Search, ns)
        def search_foo():
    session = Session(profile_name=profile_name)
    return session.client(
        "sqs",
        endpoint_url=endpoint_url,
        region_name=region_name,
    )


@defaults(
    endpoint_url=None,
    profile_name=None,
    region_name=None,
    # backoff policy
    backoff_policy="NaiveBackoffPolicy",
    # SQS will not return more than ten messages at a time
    limit=typed(int, default_value=10),
    # SQS will only return a few messages at time unless long polling is enabled (>0)
    wait_seconds=typed(int, default_value=1),
    # On error, change the visibility timeout when nacking
    message_retry_visibility_timeout_seconds=typed(int, default_value=5),
)
def configure_sqs_consumer(graph):
    """
    Configure an SQS consumer.

    """
    sqs_queue_url = graph.config.sqs_consumer.sqs_queue_url

    if graph.metadata.testing or sqs_queue_url == "test":
        from unittest.mock import MagicMock
        sqs_client = MagicMock()
Routing registration support.

Intercepts Flask's normal route registration to inject conventions.

"""
from flask_cors import cross_origin
from microcosm.api import defaults, typed
from microcosm.config.types import boolean
from microcosm_logging.decorators import context_logger


@defaults(
    converters=[
        "uuid",
    ],
    enable_audit=typed(boolean, default_value=True),
    enable_basic_auth=typed(boolean, default_value=False),
    enable_context_logger=typed(boolean, default_value=True),
    enable_cors=typed(boolean, default_value=True),
)
def configure_route_decorator(graph):
    """
    Configure a flask route decorator that operates on `Operation` and `Namespace` objects.

    By default, enables CORS support, assuming that service APIs are not exposed
    directly to browsers except when using API browsing tools.

    Usage:

        @graph.route(ns.collection_path, Operation.Search, ns)
        def search_foo():
Beispiel #17
0
from microcosm.api import defaults, typed
from microcosm.config.types import boolean
from microcosm.errors import NotBoundError

from microcosm_pubsub.result import MessageHandlingResult, MessageHandlingResultType


@defaults(enabled=typed(boolean, default_value=True))
class PubSubSendMetrics:
    """
    Send metrics relating to a single MessageHandlingResult

    """
    def __init__(self, graph):
        self.metrics = self.get_metrics(graph)
        self.enabled = bool(self.metrics and self.metrics.host != "localhost"
                            and graph.config.pubsub_send_metrics.enabled)

    def get_metrics(self, graph):
        """
        Fetch the metrics client from the graph.

        Metrics will be disabled if the not configured.

        """
        try:
            return graph.metrics
        except NotBoundError:
            return None

    def __call__(self, result: MessageHandlingResult):
from microcosm.api import defaults, typed
from microcosm.config.types import boolean, comma_separated_list

from microcosm_caching.memcached import MemcachedCache


@defaults(
    enabled=typed(boolean, default_value=False),
    servers=typed(comma_separated_list, default_value="localhost:11211"),
    connect_timeout=typed(float, default_value=3.0),
    read_timeout=typed(float, default_value=2.0),
    ignore_exc=typed(boolean, default_value=False),
)
def configure_resource_cache(graph):
    """
    Configure the resource cache which will be exposed via the
    microcosm application graph.

    """
    if not graph.config.resource_cache.enabled:
        return None

    kwargs = dict(
        servers=parse_server_config(graph.config.resource_cache.servers),
        connect_timeout=graph.config.resource_cache.connect_timeout,
        read_timeout=graph.config.resource_cache.read_timeout,
        ignore_exc=graph.config.resource_cache.ignore_exc,
    )

    if graph.metadata.testing:
        kwargs.update(dict(testing=True))
from microcosm.api import defaults, typed

from microcosm_fastapi.conventions.health.models import Health
from microcosm_fastapi.conventions.health.resources import HealthSchema


@defaults(
    include_build_info=typed(bool, default=True), )
def configure_health(graph):
    """
    Mount the health endpoint to the graph

    """
    health_container = Health(
        graph, graph.config.health_convention.include_build_info)

    @graph.app.get("/api/health")
    def configure_health_endpoint(full: bool = False) -> HealthSchema:
        response_data = health_container.to_object(full=full)

        if not response_data.ok:
            raise

        return response_data

    return health_container
Beispiel #20
0
from datetime import datetime, timedelta
from logging import Logger
from tracemalloc import start, take_snapshot

from microcosm.api import binding, defaults, typed
from microcosm.config.types import boolean


@binding("memory_profiler")
@defaults(
    enabled=typed(boolean, False),
    report_size_lines=typed(int, 10),
    sampling_interval_min=typed(int, 10),
)
class MemoryProfiler:
    logger: Logger

    def __init__(self, graph):
        self.enabled = graph.config.memory_profiler.enabled
        self.report_size_lines = graph.config.memory_profiler.report_size_lines
        self.logger = graph.logger

        self.sampling_interval_min = graph.config.memory_profiler.sampling_interval_min
        self.last_sampling_time_delta = timedelta(
            minutes=self.sampling_interval_min)

        self.last_sampling_time = datetime.now()

        if not self.enabled:
            self.logger.info(
                "Skipping initialization because memory profiling is not enabled!"
        resource_dict = graph.config.sns_topic_arns.get(lifecycle_change, {})
        for resource_name, topic in iter_topic_mappings(resource_dict):
            media_type = make_media_type(resource_name, lifecycle_change)
            sns_topic_arns[media_type] = topic

    return sns_topic_arns


@defaults(
    profile_name=None,
    region_name=None,
    endpoint_url=None,
    mock_sns=True,
    skip=None,
    # the size used to determine batching in the deferred batch producer
    deferred_batch_size=typed(int, default_value=100),
)
def configure_sns_producer(graph):
    """
    Configure an SNS producer.

    The SNS Producer requires the following collaborators:
        - Opaque from microcosm.opaque for capturing context information
        - an aws sns client, i.e. from boto.
        - pubsub message codecs: see tests for examples.
        - sns topic arns: see tests for examples.

    """
    if graph.metadata.testing:
        from unittest.mock import MagicMock
                kwargs["response_model"] = fn.__annotations__["return"]
        except AttributeError:
            pass
        return kwargs

    def inject_default_response(self, kwargs):
        if kwargs.get("responses", None):
            kwargs["responses"]["default"] = {"model": ErrorSchema}
        else:
            kwargs["responses"] = {"default": {"model": ErrorSchema}}

        return kwargs


@defaults(
    port=typed(int, default_value=5000),
    host="127.0.0.1",
)
def configure_fastapi(graph):
    # Docs use 3rd party dependencies by default - if documentation
    # is desired by client callers, use the `graph.use("docs")` bundled
    # with microcosm-fastapi. This hook provides a mirror to the default
    # docs/redocs but while hosted locally.
    app = FastAPIWrapper(
        port=graph.config.app.port,
        debug=graph.metadata.debug,
        docs_url=None,
        redoc_url=None,
    )

    # Request_context is used for logging purposes
from microcosm.api import defaults, typed
from microcosm_logging.decorators import logger
from microcosm_logging.timing import elapsed_time
from microcosm_pubsub.dispatcher import SQSMessageDispatcher
from microcosm_pubsub.result import MessageHandlingResultType

from microcosm_fastapi.pubsub.result import MessageHandlingResultAsync

PUBLISHED_KEY = "X-Request-Published"


@logger
@defaults(
    # Number of failed attempts after which the message stops being processed
    message_max_processing_attempts=typed(int, default_value=None),
    # Quantity of messages to parse within the same runloop
    message_max_concurrent_operations=typed(int, default_value=5),
)
class SQSMessageDispatcherAsync(SQSMessageDispatcher):
    def __init__(self, graph):
        super().__init__(graph)

        self.max_processing_attempts = (
            graph.config.sqs_message_dispatcher_async.
            message_max_processing_attempts)
        self.max_concurrent_operations = (
            graph.config.sqs_message_dispatcher_async.
            message_max_concurrent_operations)

    def handle_batch(self, bound_handlers) -> List[MessageHandlingResultAsync]:
"""
Message context.

"""
from typing import Dict

from microcosm.api import defaults, typed
from microcosm.config.types import boolean
from microcosm_logging.decorators import logger

from microcosm_pubsub.constants import TTL_KEY, URI_KEY
from microcosm_pubsub.message import SQSMessage


@defaults(
    enable_ttl=typed(boolean, default_value=True),
    initial_ttl=typed(int, default_value=32),
)
@logger
class SQSMessageContext:
    """
    Factory for per-message contexts.

    """
    def __init__(self, graph):
        self.enable_ttl = graph.config.sqs_message_context.enable_ttl
        self.initial_ttl = graph.config.sqs_message_context.initial_ttl

    def __call__(self, context: SQSMessage, **kwargs) -> Dict[str, str]:
        """
        Create a new context from a message.
from typing import List

from inflection import titleize
from microcosm.api import defaults, typed
from microcosm_logging.decorators import context_logger, logger
from microcosm_logging.timing import elapsed_time

from microcosm_pubsub.context import TTL_KEY
from microcosm_pubsub.errors import IgnoreMessage, TTLExpired, SkipMessage
from microcosm_pubsub.result import MessageHandlingResult


@logger
@defaults(
    # Number of failed attempts after which the message stops being processed
    message_max_processing_attempts=typed(int, default_value=None)
)
class SQSMessageDispatcher:
    """
    Dispatch batches of SQSMessages to handler functions.

    """
    def __init__(self, graph):
        self.opaque = graph.opaque
        self.sqs_consumer = graph.sqs_consumer
        self.sqs_message_context = graph.sqs_message_context
        self.sqs_message_handler_registry = graph.sqs_message_handler_registry
        self.send_metrics = graph.pubsub_send_metrics
        self.max_processing_attempts = graph.config.sqs_message_dispatcher.message_max_processing_attempts

    def handle_batch(self, bound_handlers) -> List[MessageHandlingResult]:
"""
Message context.

"""
from typing import Dict

from microcosm.api import defaults, typed
from microcosm.config.types import boolean
from microcosm_logging.decorators import logger

from microcosm_pubsub.constants import RECEIPT_HANDLE_KEY, TTL_KEY, URI_KEY
from microcosm_pubsub.message import SQSMessage


@defaults(
    enable_ttl=typed(boolean, default_value=True),
    initial_ttl=typed(int, default_value=32),
)
@logger
class SQSMessageContext:
    """
    Factory for per-message contexts.

    """
    def __init__(self, graph):
        self.enable_ttl = graph.config.sqs_message_context.enable_ttl
        self.initial_ttl = graph.config.sqs_message_context.initial_ttl

    def __call__(self, context: SQSMessage, **kwargs) -> Dict[str, str]:
        """
        Create a new context from a message.
"""
Store build information.

"""
from dataclasses import dataclass
from typing import Optional

from microcosm.api import defaults, typed


@dataclass
class BuildInfo:
    build_num: Optional[str]
    sha1: Optional[str]


@defaults(
    build_num=typed(str, default_value=None),
    sha1=typed(str, default_value=None),
)
def configure_build_info(graph):
    """
    Configure build info

    """
    return BuildInfo(
        build_num=graph.config.build_info.build_num,
        sha1=graph.config.build_info.sha1,
    )
Beispiel #28
0
from typing import List

from inflection import titleize
from microcosm.api import defaults, typed
from microcosm_logging.decorators import context_logger, logger
from microcosm_logging.timing import elapsed_time

from microcosm_pubsub.context import TTL_KEY
from microcosm_pubsub.errors import IgnoreMessage, TTLExpired, SkipMessage
from microcosm_pubsub.result import MessageHandlingResult


@logger
@defaults(
    # Number of failed attempts after which the message stops being processed
    message_max_processing_attempts=typed(int, default_value=None))
class SQSMessageDispatcher:
    """
    Dispatch batches of SQSMessages to handler functions.

    """
    def __init__(self, graph):
        self.opaque = graph.opaque
        self.sqs_consumer = graph.sqs_consumer
        self.sqs_message_context = graph.sqs_message_context
        self.sqs_message_handler_registry = graph.sqs_message_handler_registry
        self.send_metrics = graph.pubsub_send_metrics
        self.max_processing_attempts = graph.config.sqs_message_dispatcher.message_max_processing_attempts

    def handle_batch(self, bound_handlers) -> List[MessageHandlingResult]:
        """
Beispiel #29
0
    def test_valid_default_factory(self):
        self.create_fixture(value=typed(list, default_factory=list))
        loader = load_from_dict()

        config = configure(self.registry.defaults, self.metadata, loader)
        assert_that(config, has_entries(foo=has_entries(value=empty(), ), ))
Beispiel #30
0
from jaeger_client.config import (
    DEFAULT_REPORTING_HOST,
    DEFAULT_REPORTING_PORT,
    DEFAULT_SAMPLING_PORT,
    Config,
)

from microcosm.api import binding, defaults, typed

SPAN_NAME = "span_name"


@binding("tracer")
@defaults(
    sample_type="ratelimiting",
    sample_param=typed(int, 10),
    sampling_port=typed(int, DEFAULT_SAMPLING_PORT),
    reporting_port=typed(int, DEFAULT_REPORTING_PORT),
    reporting_host=DEFAULT_REPORTING_HOST,
)
def configure_tracing(graph):
    """
    See https://www.jaegertracing.io/docs/1.12/sampling/ for more info about
    available sampling strategies.

    """
    config = Config(
        config={
            "sampler": {
                "type": graph.config.tracer.sample_type,
                "param": graph.config.tracer.sample_param,
"""
Metrics extensions for routes.

"""
from microcosm.api import defaults, typed
from microcosm.config.types import boolean
from microcosm.errors import NotBoundError


@defaults(
    enabled=typed(boolean, default_value=True),
)
class RouteMetrics:

    def __init__(self, graph):
        self.metrics = self.get_metrics(graph)
        self.enabled = bool(
            self.metrics
            and self.metrics.host != "localhost"
            and graph.config.route_metrics.enabled
        )
        self.graph = graph

    def get_metrics(self, graph):
        """
        Fetch the metrics client from the graph.

        Metrics will be disabled if the not configured.

        """
        try: