Beispiel #1
0
def make_task(app):
    SQS_APPLY_ASYNC_DURATION_SECONDS = Histogram(
        'sqs_apply_async_duration_seconds', 'Time taken to put task on queue',
        ['task_name'])

    class NotifyTask(Task):
        abstract = True
        start = None

        def on_success(self, retval, task_id, args, kwargs):
            elapsed_time = time.time() - self.start
            app.logger.info("{task_name} took {time}".format(
                task_name=self.name, time="{0:.4f}".format(elapsed_time)))

        def on_failure(self, exc, task_id, args, kwargs, einfo):
            # ensure task will log exceptions to correct handlers
            app.logger.exception('Celery task: {} failed'.format(self.name))
            super().on_failure(exc, task_id, args, kwargs, einfo)

        def __call__(self, *args, **kwargs):
            # ensure task has flask context to access config, logger, etc
            with app.app_context():
                self.start = time.time()
                # Remove 'request_id' from the kwargs (so the task doesn't get an unexpected kwarg), then add it to g
                # so that it gets logged
                g.request_id = kwargs.pop('request_id', None)
                return super().__call__(*args, **kwargs)

        def apply_async(self,
                        args=None,
                        kwargs=None,
                        task_id=None,
                        producer=None,
                        link=None,
                        link_error=None,
                        **options):
            kwargs = kwargs or {}
            if has_request_context() and hasattr(request, 'request_id'):
                kwargs['request_id'] = request.request_id
            elif has_app_context() and 'request_id' in g:
                kwargs['request_id'] = g.request_id

            with SQS_APPLY_ASYNC_DURATION_SECONDS.labels(self.name).time():
                return super().apply_async(args, kwargs, task_id, producer,
                                           link, link_error, **options)

    return NotifyTask
Beispiel #2
0
def setup_sqlalchemy_events(app):

    TOTAL_DB_CONNECTIONS = Gauge(
        'db_connection_total_connected',
        'How many db connections are currently held (potentially idle) by the server',
    )

    TOTAL_CHECKED_OUT_DB_CONNECTIONS = Gauge(
        'db_connection_total_checked_out',
        'How many db connections are currently checked out by web requests',
    )

    DB_CONNECTION_OPEN_DURATION_SECONDS = Histogram(
        'db_connection_open_duration_seconds',
        'How long db connections are held open for in seconds',
        ['method', 'host', 'path'])

    # need this or db.engine isn't accessible
    with app.app_context():

        @event.listens_for(db.engine, 'connect')
        def connect(dbapi_connection, connection_record):
            # connection first opened with db
            TOTAL_DB_CONNECTIONS.inc()

        @event.listens_for(db.engine, 'close')
        def close(dbapi_connection, connection_record):
            # connection closed (probably only happens with overflow connections)
            TOTAL_DB_CONNECTIONS.dec()

        @event.listens_for(db.engine, 'checkout')
        def checkout(dbapi_connection, connection_record, connection_proxy):
            try:
                # connection given to a web worker
                TOTAL_CHECKED_OUT_DB_CONNECTIONS.inc()

                # this will overwrite any previous checkout_at timestamp
                connection_record.info['checkout_at'] = time.monotonic()

                # checkin runs after the request is already torn down, therefore we add the request_data onto the
                # connection_record as otherwise it won't have that information when checkin actually runs.
                # Note: this is not a problem for checkouts as the checkout always happens within a web request or task

                # web requests
                if has_request_context():
                    connection_record.info['request_data'] = {
                        'method':
                        request.method,
                        'host':
                        request.host,
                        'url_rule':
                        request.url_rule.rule
                        if request.url_rule else 'No endpoint'
                    }
                # celery apps
                elif current_task:
                    connection_record.info['request_data'] = {
                        'method': 'celery',
                        'host':
                        current_app.config['NOTIFY_APP_NAME'],  # worker name
                        'url_rule': current_task.name,  # task name
                    }
                # anything else. migrations possibly.
                else:
                    current_app.logger.warning(
                        'Checked out sqlalchemy connection from outside of request/task'
                    )
                    connection_record.info['request_data'] = {
                        'method': 'unknown',
                        'host': 'unknown',
                        'url_rule': 'unknown',
                    }
            except Exception:
                current_app.logger.exception(
                    "Exception caught for checkout event.")

        @event.listens_for(db.engine, 'checkin')
        def checkin(dbapi_connection, connection_record):
            try:
                # connection returned by a web worker
                TOTAL_CHECKED_OUT_DB_CONNECTIONS.dec()

                # duration that connection was held by a single web request
                duration = time.monotonic(
                ) - connection_record.info['checkout_at']

                DB_CONNECTION_OPEN_DURATION_SECONDS.labels(
                    connection_record.info['request_data']['method'],
                    connection_record.info['request_data']['host'],
                    connection_record.info['request_data']
                    ['url_rule']).observe(duration)
            except Exception:
                current_app.logger.exception(
                    "Exception caught for checkin event.")
from app.models import (INTERNATIONAL_SMS_TYPE, SMS_TYPE, EMAIL_TYPE,
                        LETTER_TYPE, KEY_TYPE_TEST, KEY_TYPE_TEAM,
                        ServicePermission, INTERNATIONAL_LETTERS)
from app.service.utils import service_allowed_to_send_to
from app.v2.errors import TooManyRequestsError, BadRequestError, RateLimitError, ValidationError
from app import redis_store
from app.notifications.process_notifications import create_content_for_notification
from app.utils import get_public_notify_type_text
from app.dao.service_email_reply_to_dao import dao_get_reply_to_by_id
from app.dao.service_letter_contact_dao import dao_get_letter_contact_by_id
from app.serialised_models import SerialisedTemplate

from gds_metrics.metrics import Histogram

REDIS_EXCEEDED_RATE_LIMIT_DURATION_SECONDS = Histogram(
    'redis_exceeded_rate_limit_duration_seconds',
    'Time taken to check rate limit',
)


def check_service_over_api_rate_limit(service, api_key):
    if current_app.config['API_RATE_LIMIT_ENABLED'] and current_app.config[
            'REDIS_ENABLED']:
        cache_key = rate_limit_cache_key(service.id, api_key.key_type)
        rate_limit = service.rate_limit
        interval = 60
        with REDIS_EXCEEDED_RATE_LIMIT_DURATION_SECONDS.time():
            if redis_store.exceeded_rate_limit(cache_key, rate_limit,
                                               interval):
                current_app.logger.info(
                    "service {} has been rate limited for throughput".format(
                        service.id))