from aiohttp.web_exceptions import HTTPUnauthorized from guillotina import glogging from guillotina._settings import app_settings from guillotina.interfaces import IRequest import fnmatch logger = glogging.getLogger("guillotina") class DefaultCorsRenderer: def __init__(self, request: IRequest) -> None: self.request = request async def get_settings(self): return app_settings["cors"] async def get_headers(self): settings = await self.get_settings() headers = {} origin = self.request.headers.get("Origin", None) if origin: if "*" in settings["allow_origin"]: headers["Access-Control-Allow-Origin"] = "*" elif any([ fnmatch.fnmatchcase(origin, o) for o in settings["allow_origin"] ]): headers["Access-Control-Allow-Origin"] = origin else: logger.error("Origin %s not allowed" % origin,
amqp_running_jobs = Gauge("amqp_running_jobs", "Number of AQMP running jobs in worker", []) amqp_job_duration = Histogram( "amqp_job_duration", "AMQP job duration histogram", ["dotted_name", "final_status", "container_id"], ) except ImportError: # Do not record metrics if prometheus_client not installed amqp_running_jobs = None amqp_job_duration = None logger = glogging.getLogger("guillotina_amqp.worker") default_delayed = 1000 * 60 * 2 # 2 minutes default_errored = 1000 * 60 * 60 * 24 * 7 * 1 # 1 week class Worker: """Workers hold an asyncio loop in which will run several tasks. It reads from RabbitMQ for new job descriptions and will run them in asyncio tasks. The worker is aware of a state manager in which he posts job results. """ sleep_interval = 0.1
from guillotina.utils import resolve_dotted_name from guillotina.utils import resolve_path from guillotina.writable import check_writable_request import aiohttp import aiotask_context import asyncio import json import logging.config try: from Crypto.PublicKey import RSA except ImportError: RSA = None logger = glogging.getLogger('guillotina') def update_app_settings(settings): for key, value in settings.items(): if (isinstance(app_settings.get(key), dict) and isinstance(value, dict)): app_settings[key].update(value) else: app_settings[key] = value def load_application(module, root, settings): # includeme function if hasattr(module, 'includeme'): lazy_apply(module.includeme, root, settings)
from guillotina.response import HTTPConflict from guillotina.traversal import TraversalRouter from guillotina.utils import lazy_apply from guillotina.utils import list_or_dict_items from guillotina.utils import resolve_dotted_name from guillotina.utils import resolve_path from guillotina.utils import secure_passphrase from jwcrypto import jwk import aiotask_context import asyncio import json import logging.config logger = glogging.getLogger('guillotina') def update_app_settings(settings): for key, value in settings.items(): if (isinstance(app_settings.get(key), dict) and isinstance(value, dict)): app_settings[key].update(value) else: app_settings[key] = value class ApplicationConfigurator: def __init__(self, applications, config, root, settings, configured=None): if configured is None:
from guillotina.event import notify from guillotina.exceptions import ConflictError from guillotina.exceptions import ConflictIdOnContainer from guillotina.exceptions import TIDConflictError from guillotina.profile import profilable from zope.interface import implementer import asyncio import asyncpg import asyncpg.connection import concurrent import orjson import time log = glogging.getLogger("guillotina.storage") # we can not use FOR UPDATE or FOR SHARE unfortunately because # it can cause deadlocks on the database--we need to resolve them ourselves register_sql( "GET_OID", f""" SELECT zoid, tid, state_size, resource, of, parent_id, id, type, state FROM {{table_name}} WHERE zoid = $1::varchar({MAX_UID_LENGTH}) """, ) register_sql( "GET_CHILDREN_KEYS",
'amqp_running_jobs', 'Number of AQMP running jobs in worker', []) amqp_job_duration = Histogram( 'amqp_job_duration', 'AMQP job duration histogram', ['dotted_name', 'final_status', 'container_id']) except ImportError: # Do not record metrics if prometheus_client not installed amqp_running_jobs = None amqp_job_duration = None logger = glogging.getLogger('guillotina_amqp.worker') default_delayed = 1000 * 60 * 2 # 2 minutes default_errored = 1000 * 60 * 60 * 24 * 7 * 1 # 1 week class Worker: """Workers hold an asyncio loop in which will run several tasks. It reads from RabbitMQ for new job descriptions and will run them in asyncio tasks. The worker is aware of a state manager in which he posts job results. """ sleep_interval = 0.1 last_activity = time.time()
from guillotina import app_settings from guillotina.utils import resolve_dotted_name from guillotina import glogging import aioamqp import asyncio import uuid import os import json import aioamqp.exceptions logger = glogging.getLogger('guillotina_amqp') beacon_ttl_default = 30 # 30 seconds async def remove_connection(name='default'): ''' Purpose here is to close out a bad connection. Next time get_connection is called, a new connection will be established ''' amqp_settings = app_settings['amqp'] if 'connections' not in amqp_settings: amqp_settings['connections'] = {} connections = amqp_settings['connections'] if name not in connections: return connection = connections.pop(name) try:
from guillotina_amqp.interfaces import IStateManagerUtility from lru import LRU import asyncio import json import time import uuid import copy try: import aioredis from guillotina.contrib import redis except ImportError: aioredis = None logger = glogging.getLogger('guillotina_amqp.state') DEFAULT_LOCK_TTL_S = 60 * 1 # 1 minute class TaskStatus: SCHEDULED = 'scheduled' CANCELED = 'canceled' RUNNING = 'running' FINISHED = 'finished' ERRORED = 'errored' @configure.utility(provides=IStateManagerUtility, name='memory') class MemoryStateManager: '''
from lru import LRU import asyncio import backoff import copy import json import time import uuid try: import aioredis from guillotina.contrib import redis except ImportError: aioredis = None logger = glogging.getLogger("guillotina_amqp.state") DEFAULT_LOCK_TTL_S = 60 * 1 # 1 minute class TaskStatus: SCHEDULED = "scheduled" CANCELED = "canceled" RUNNING = "running" FINISHED = "finished" ERRORED = "errored" @configure.utility(provides=IStateManagerUtility, name="memory") class MemoryStateManager: """
from guillotina import glogging from guillotina import task_vars from guillotina.commands.server import ServerCommand from guillotina.tests.utils import get_mocked_request from guillotina_amqp.worker import Worker import asyncio import os import threading try: import prometheus_client except ImportError: prometheus_client = None logger = glogging.getLogger("guillotina_amqp") async def prometheus_view(request): if prometheus_client is None: return None output = prometheus_client.exposition.generate_latest() return web.Response(text=output.decode("utf8")) def loop_check(loop, timeout): # Elapsed time since last update diff = loop.time() - getattr(loop, "__ping_time__", 0) if diff > timeout: logger.error(f"Exiting worker because no activity in {diff} seconds")
from guillotina_amqp.interfaces import ITaskDefinition from guillotina_amqp.state import get_state_manager from guillotina_amqp.state import TaskState from guillotina_amqp.state import update_task_scheduled from guillotina_amqp.types import SerializedRequest from typing import cast from urllib.parse import urlparse import aioamqp import asyncio import inspect import json import time import uuid logger = glogging.getLogger("guillotina_amqp.utils") async def cancel_task(task_id): """It cancels a task by id. Returns wether it could be cancelled. """ task = TaskState(task_id) success = await task.cancel() return success def get_task_id_prefix(): db = task_vars.db.get() container = task_vars.container.get() return "task:{}-{}-".format(db.id, container.id)
from guillotina.utils.misc import get_current_container from guillotina_amqp import amqp from guillotina_amqp.exceptions import ObjectNotFoundException from guillotina_amqp.interfaces import ITaskDefinition from guillotina_amqp.state import get_state_manager from guillotina_amqp.state import TaskState from guillotina_amqp.state import update_task_scheduled import aioamqp import asyncio import inspect import json import time import uuid logger = glogging.getLogger('guillotina_amqp.utils') async def cancel_task(task_id): """It cancels a task by id. Returns wether it could be cancelled. """ task = TaskState(task_id) success = await task.cancel() return success def get_task_id_prefix(): db = task_vars.db.get() container = task_vars.container.get() return 'task:{}-{}-'.format(db.id, container.id)