class ContextVarsRuntimeContext(RuntimeContext):
    """An implementation of the RuntimeContext interface which wraps ContextVar under
    the hood. This is the prefered implementation for usage with Python 3.5+
    """

    _CONTEXT_KEY = "current_context"

    def __init__(self) -> None:
        self._current_context = ContextVar(self._CONTEXT_KEY,
                                           default=Context())

    def attach(self, context: Context) -> object:
        """See `opentelemetry.context.RuntimeContext.attach`."""
        return self._current_context.set(context)

    def get_current(self) -> Context:
        """See `opentelemetry.context.RuntimeContext.get_current`."""
        return self._current_context.get()

    def detach(self, token: object) -> None:
        """See `opentelemetry.context.RuntimeContext.detach`."""
        self._current_context.reset(token)  # type: ignore
Ejemplo n.º 2
0
class ServiceContextClass:
    def __init__(self) -> None:
        self.request_id_var = ContextVar("request_id_var",
                                         default=t.cast(
                                             "t.Optional[int]", None))
        self.component_name_var: ContextVar[str] = ContextVar("component_name",
                                                              default="cli")

    @property
    def sampled(self) -> t.Optional[int]:
        span = trace.get_current_span()
        if span is None:
            return None
        return 1 if span.get_span_context().trace_flags.sampled else 0

    @property
    def trace_id(self) -> t.Optional[int]:
        span = trace.get_current_span()
        if span is None:
            return None
        return span.get_span_context().trace_id

    @property
    def span_id(self) -> t.Optional[int]:
        span = trace.get_current_span()
        if span is None:
            return None
        return span.get_span_context().span_id

    @property
    def request_id(self) -> t.Optional[int]:
        return self.request_id_var.get()

    @property
    def component_name(self) -> t.Optional[str]:
        return self.component_name_var.get()
Ejemplo n.º 3
0
def trace_stack_push(trace_stack_var: ContextVar, node: Any) -> None:
    """Push an element to the top of a trace stack."""
    if (trace_stack := trace_stack_var.get()) is None:
        trace_stack = []
        trace_stack_var.set(trace_stack)
Ejemplo n.º 4
0
class Resolver(IResolver):
    def __init__(
        self,
        dependency_storage: DependencyStorage,
    ) -> None:
        self._dependency_storage = dependency_storage
        self._condition_collections = ConditionCollections(
            self,
            dependency_storage, [
                AnyCondition, CollectionCondition, UnionCondition,
                IteratorCondition, ForwardRefCondition, GenericCondition
            ],
            default_condition=DefaultCondition)
        self._context_dependencies = ContextVar(f"_context_storage_{id(self)}")

    def _add_scoped_dependency(self, wrapper: DependencyWrapper,
                               resolved_dependency: Any) -> None:
        if wrapper.type_arguments:
            name_in_store = (wrapper.type_, wrapper.type_arguments)
        else:
            name_in_store = wrapper.type_

        context_dependencies = self._context_dependencies.get(None)
        if context_dependencies is None:
            new_context_dependencies = {name_in_store: [resolved_dependency]}
            self._context_dependencies.set(new_context_dependencies)
        else:
            dependencies = context_dependencies.get(name_in_store, None)
            if dependencies is None:
                context_dependencies[name_in_store] = [resolved_dependency]
            else:
                dependencies.append(resolved_dependency)

    def get_implementation_attr(
            self, annotations: Tuple[Tuple[str, Any]]) -> Dict[str, Any]:
        """Get resolved signature attributes"""
        callable_object_arguments = {}
        for name, annotation in annotations:
            if name == "self":
                continue

            callable_object_arguments[name] = self._condition_collections.find(
                annotation)

        return callable_object_arguments

    def _get_implementation(self, dependency_wrapper: DependencyWrapper):
        if dependency_wrapper.annotations is None:
            if dependency_wrapper.scope == Scope.CONTEXT:
                self._add_scoped_dependency(dependency_wrapper.target,
                                            dependency_wrapper.target)
            return dependency_wrapper.target

        attr = self.get_implementation_attr(dependency_wrapper.annotations)
        resolved_dependency = dependency_wrapper.target(**attr)
        if dependency_wrapper.scope == Scope.SINGLETON:
            dependency_wrapper.cache = resolved_dependency
        elif dependency_wrapper.scope == Scope.CONTEXT:
            self._add_scoped_dependency(dependency_wrapper,
                                        resolved_dependency)
        return resolved_dependency

    def get_resolved_dependencies(self, typing: Any) -> Iterator[Any]:
        """Get attributes from container for typing"""
        context_dependencies = self._context_dependencies.get(None)
        if context_dependencies is not None:
            dependencies = context_dependencies.get(typing, None)
            if dependencies is not None:
                for dependency in dependencies:
                    yield dependency
                return

        wrappers = self._dependency_storage.get_dependencies_by_annotation(
            typing)
        for wrapper in wrappers:
            yield self._check_and_get_implementation(wrapper)

        for dependency_wrapper in self._dependency_storage.get_dependencies(
                ignore_annotation=typing):
            if not _check_annotation(typing, dependency_wrapper.type_):
                continue
            yield self._check_and_get_implementation(dependency_wrapper)

    def _check_and_get_implementation(
            self, dependency_wrapper: DependencyWrapper) -> Any:
        if dependency_wrapper.cache is not None and dependency_wrapper.scope == Scope.SINGLETON:
            return dependency_wrapper.cache
        else:
            return self._get_implementation(dependency_wrapper)
Ejemplo n.º 5
0
class Kanata(BaseDispatcher):
    "彼方."

    always = True  # 兼容重构版的 bcc.

    signature_list: List[Union[NormalMatch, PatternReceiver]]
    stop_exec_if_fail: bool = True

    parsed_items: ContextVar[Dict[str, MessageChain]]

    allow_quote: bool
    skip_one_at_in_quote: bool

    content_token: Optional[Token] = None

    def __init__(
        self,
        signature_list: List[Union[NormalMatch, PatternReceiver]],
        stop_exec_if_fail: bool = True,
        allow_quote: bool = True,
        skip_one_at_in_quote: bool = False,
    ) -> None:
        """该魔法方法用于实例化该参数解析器.

        Args:
            signature_list (List[Union[NormalMatch, PatternReceiver]]): 匹配标识链
            stop_exec_if_fail (bool, optional): 是否在无可用匹配时停止监听器执行. Defaults to True.
            allow_quote (bool, optional): 是否允许 Kanata 处理回复消息中的用户输入部分. Defaults to True.
            skip_one_at_in_quote (bool, optional): 是否允许 Kanata 在处理回复消息中的用户输入部分时自动删除可能\
                由 QQ 客户端添加的 At 和一个包含在单独 Plain 元素中的空格. Defaults to False.
        """
        self.signature_list = signature_list
        self.stop_exec_if_fail = stop_exec_if_fail
        self.parsed_items = ContextVar("kanata_parsed_items")
        self.allow_quote = allow_quote
        self.skip_one_at_in_quote = skip_one_at_in_quote

    @staticmethod
    def detect_index(
        signature_chain: Tuple[Union[NormalMatch, PatternReceiver]],
        message_chain: MessageChain,
    ) -> Optional[Dict[str, Tuple[MessageIndex, MessageIndex]]]:
        merged_chain = merge_signature_chain(signature_chain)
        message_chain = message_chain.asMerged()
        element_num = len(message_chain.__root__)
        end_index: MessageIndex = (
            element_num - 1,
            len(message_chain.__root__[-1].text) if element_num != 0
            and message_chain.__root__[-1].__class__ is Plain else None,
        )

        reached_message_index: MessageIndex = (0, None)
        # [0] => real_index
        # [1] => text_index(optional)

        start_index: MessageIndex = (0, None)

        match_result: Dict[Arguments, Tuple[
            MessageIndex, MessageIndex],  # start(include)  # stop(exclude)
                           ] = {}

        signature_iterable = InsertGenerator(enumerate(merged_chain))
        latest_index = None
        matching_recevier: Optional[Arguments] = None

        for signature_index, signature in signature_iterable:
            if isinstance(signature, (Arguments, PatternReceiver)):
                if matching_recevier:  # 已经选中了一个...
                    if isinstance(signature, Arguments):
                        if latest_index == signature_index:
                            matching_recevier.content.extend(signature.content)
                            continue
                        else:
                            raise TypeError(
                                "a unexpected case: match conflict")
                    if isinstance(signature, PatternReceiver):
                        matching_recevier.content.append(signature)
                        continue
                else:
                    if isinstance(signature, PatternReceiver):
                        signature = Arguments([signature])
                matching_recevier = signature
                start_index = reached_message_index
            elif isinstance(signature, NormalMatch):
                if not matching_recevier:
                    # 如果不要求匹配参数, 从当前位置(reached_message_index)开始匹配FullMatch.
                    current_chain = message_chain.subchain(
                        slice(reached_message_index, None, None))
                    if not current_chain.__root__:  # index 越界
                        return
                    if not isinstance(current_chain.__root__[0], Plain):
                        # 切片后第一个 **不是** Plain.
                        return
                    re_match_result = re.match(signature.operator(),
                                               current_chain.__root__[0].text)
                    if not re_match_result:
                        # 不匹配的
                        return
                    # 推进当前进度.
                    plain_text_length = len(current_chain.__root__[0].text)
                    pattern_length = re_match_result.end(
                    ) - re_match_result.start()
                    if (pattern_length + 1) > plain_text_length:  # 推进后可能造成错误
                        # 不推进 text_index 进度, 转而推进 element_index 进度
                        reached_message_index = (reached_message_index[0] + 1,
                                                 None)
                    else:
                        # 推进 element_index 进度至已匹配到的地方后.
                        reached_message_index = (
                            reached_message_index[0],
                            origin_or_zero(reached_message_index[1]) +
                            re_match_result.start() + pattern_length,
                        )
                else:
                    # 需要匹配参数(是否贪婪模式查找, 即是否从后向前)
                    greed = matching_recevier.isGreed
                    for element_index, element in enumerate(
                            message_chain.subchain(
                                slice(reached_message_index, None,
                                      None)).__root__):
                        if isinstance(element, Plain):
                            current_text: str = element.text
                            # 完成贪婪判断
                            text_find_result_list = list(
                                re.finditer(signature.operator(),
                                            current_text))
                            if not text_find_result_list:
                                continue
                            text_find_result = text_find_result_list[-int(greed
                                                                          )]
                            if not text_find_result:
                                continue
                            text_find_index = text_find_result.start()

                            # 找到了! 这里不仅要推进进度, 还要把当前匹配的参数记录结束位置并清理.
                            stop_index = (
                                reached_message_index[0] + element_index +
                                int(element_index == 0),
                                origin_or_zero(reached_message_index[1]) +
                                text_find_index,
                            )
                            match_result[matching_recevier] = (
                                copy.copy(start_index),
                                stop_index,
                            )

                            start_index = (0, None)
                            matching_recevier = None

                            pattern_length = (text_find_result.end() -
                                              text_find_result.start())
                            if (current_text == text_find_result.string[slice(
                                    *text_find_result.span())]):
                                # 此处是如果推进 text_index 就会被爆破....
                                # 推进 element_index 而不是 text_index
                                reached_message_index = (
                                    reached_message_index[0] + element_index +
                                    int(element_index != 0),
                                    None,
                                )
                            else:
                                reached_message_index = (
                                    reached_message_index[0] + element_index,
                                    origin_or_zero(reached_message_index[1]) +
                                    text_find_index + pattern_length,
                                )
                            break
                    else:
                        # 找遍了都没匹配到.
                        return
            latest_index = signature_index
        else:
            if matching_recevier:  # 到达了终点, 却仍然还要做点事的.
                # 计算终点坐标.
                text_index = None

                latest_element = message_chain.__root__[-1]
                if isinstance(latest_element, Plain):
                    text_index = len(latest_element.text)

                stop_index = (len(message_chain.__root__), text_index)
                match_result[matching_recevier] = (start_index, stop_index)
            else:  # 如果不需要继续捕获消息作为参数, 但 Signature 已经无法指示 Message 的样式时, 判定本次匹配非法.
                if reached_message_index < end_index:
                    return

        return match_result

    @staticmethod
    def detect_and_mapping(
        signature_chain: Tuple[Union[NormalMatch, PatternReceiver]],
        message_chain: MessageChain,
    ) -> Optional[Dict[Arguments, MessageChain]]:
        match_result = Kanata.detect_index(signature_chain, message_chain)
        if match_result is not None:
            return {
                k: message_chain[v[0]:(
                    v[1][0],
                    (v[1][1] - (origin_or_zero(v[0][1]) if
                                (v[1][0] <= v[0][0] <= v[1][0]) else 0)
                     ) if v[1][1] is not None else None,
                )]
                for k, v in match_result.items()
            }

    @staticmethod
    def allocation(
        mapping: Dict[Arguments, MessageChain]
    ) -> Optional[Dict[str, MessageChain]]:
        if mapping is None:
            return None
        result = {}
        for arguemnt_set, message_chain in mapping.items():
            length = len(arguemnt_set.content)
            for index, receiver in enumerate(arguemnt_set.content):
                if receiver.name in result:
                    raise ConflictItem(
                        "{0} is defined repeatedly".format(receiver))
                if isinstance(receiver, RequireParam):
                    if not message_chain.__root__:
                        return
                    result[receiver.name] = message_chain
                elif isinstance(receiver, OptionalParam):
                    if not message_chain.__root__:
                        result[receiver.name] = None
                    else:
                        result[receiver.name] = message_chain
                break  # 还没来得及做长度匹配...
        return result

    @lru_cache(None)
    async def catch_argument_names(self) -> List[str]:
        return [
            i.name for i in self.signature_list
            if isinstance(i, PatternReceiver)
        ]

    async def beforeDispatch(self, interface: DispatcherInterface):
        message_chain: MessageChain = (await interface.lookup_param(
            "__kanata_messagechain__", MessageChain, None)).exclude(Source)
        if set([i.__class__ for i in message_chain.__root__
                ]).intersection(BLOCKING_ELEMENTS):
            raise ExecutionStop()
        if self.allow_quote and message_chain.has(Quote):
            # 自动忽略自 Quote 后第一个 At
            # 0: Quote, 1: At, 2: Plain(一个空格, 可能会在以后的 mirai 版本后被其处理, 这里先自动处理这个了.)
            message_chain = message_chain[(3, None):]
            if self.skip_one_at_in_quote and message_chain.__root__:
                if message_chain.__root__[0].__class__ is At:
                    message_chain = message_chain[(
                        1, 1):]  # 利用 MessageIndex 可以非常快捷的实现特性.
        mapping_result = self.detect_and_mapping(self.signature_list,
                                                 message_chain)
        if mapping_result is not None:
            self.content_token = self.parsed_items.set(
                self.allocation(mapping_result))
        else:
            if self.stop_exec_if_fail:
                raise ExecutionStop()

    async def catch(self, interface: DispatcherInterface):
        if not self.content_token:
            return
        random_id = random.random()
        current_item = self.parsed_items.get()
        if current_item is not None:
            result = current_item.get(interface.name, random_id)
            return Force(result) if result is not random_id else None
        else:
            if self.stop_exec_if_fail:
                raise ExecutionStop()

    async def afterDispatch(
        self,
        interface: "DispatcherInterface",
        exception: Optional[Exception] = None,
        tb: Optional[TracebackType] = None,
    ):
        if self.content_token:
            self.parsed_items.reset(self.content_token)
            self.catch = Kanata.catch
            self.content_token = None
Ejemplo n.º 6
0
class GinoEngine:
    """
    Connects a :class:`~.dialects.base.Pool` and
    :class:`~sqlalchemy.engine.interfaces.Dialect` together to provide a source
    of database connectivity and behavior.

    A :class:`.GinoEngine` object is instantiated publicly using the
    :func:`gino.create_engine` function or
    :func:`db.set_bind() <gino.api.Gino.set_bind>` method.

    .. seealso::

        :doc:`/explanation/engine`

    """

    connection_cls = GinoConnection
    """Customizes the connection class to use, default is
    :class:`.GinoConnection`."""
    def __init__(self,
                 dialect,
                 pool,
                 loop,
                 logging_name=None,
                 echo=None,
                 execution_options=None):
        self._sa_engine = _SAEngine(
            dialect,
            logging_name=logging_name,
            echo=echo,
            execution_options=execution_options,
        )
        self._dialect = dialect
        self._pool = pool
        self._loop = loop
        self._ctx = ContextVar("gino", default=None)

    @property
    def dialect(self):
        """
        Read-only property for the
        :class:`~sqlalchemy.engine.interfaces.Dialect` of this engine.

        """
        return self._dialect

    @property
    def raw_pool(self):
        """
        Read-only access to the underlying database connection pool instance.
        This depends on the actual dialect in use, :class:`~asyncpg.pool.Pool`
        of asyncpg for example.

        """
        return self._pool.raw_pool

    def acquire(self, *, timeout=None, reuse=False, lazy=False, reusable=True):
        """
        Acquire a connection from the pool.

        There are two ways using this method - as an asynchronous context
        manager::

            async with engine.acquire() as conn:
                # play with the connection

        which will guarantee the connection is returned to the pool when
        leaving the ``async with`` block; or as a coroutine::

            conn = await engine.acquire()
            try:
                # play with the connection
            finally:
                await conn.release()

        where the connection should be manually returned to the pool with
        :meth:`conn.release() <.GinoConnection.release>`.

        Within the same context (usually the same :class:`~asyncio.Task`, see
        also :doc:`/how-to/transaction`), a nesting acquire by default re

        :param timeout: Block up to ``timeout`` seconds until there is one free
          connection in the pool. Default is ``None`` - block forever until
          succeeded. This has no effect when ``lazy=True``, and depends on the
          actual situation when ``reuse=True``.

        :param reuse: Reuse the latest reusable acquired connection (before
          it's returned to the pool) in current context if there is one, or
          borrow a new one if none present. Default is ``False`` for always
          borrow a new one. This is useful when you are in a nested method call
          series, wishing to use the same connection without passing it around
          as parameters. See also: :doc:`/how-to/transaction`. A reusing
          connection is not reusable even if ``reusable=True``. If the reused
          connection happened to be a lazy one, then the reusing connection is
          lazy too.

        :param lazy: Don't acquire the actual underlying connection yet - do it
          only when needed. Default is ``False`` for always do it immediately.
          This is useful before entering a code block which may or may not make
          use of a given connection object. Feeding in a lazy connection will
          save the borrow-return job if the connection is never used. If
          setting ``reuse=True`` at the same time, then the reused connection -
          if any - applies the same laziness. For example, reusing a lazy
          connection with ``lazy=False`` will cause the reused connection to
          acquire an underlying connection immediately.

        :param reusable: Mark this connection as reusable or otherwise. This
          has no effect if it is a reusing connection. All reusable connections
          are placed in a stack, any reusing acquire operation will always
          reuse the top (latest) reusable connection. One reusable connection
          may be reused by several reusing connections - they all share one
          same underlying connection. Acquiring a connection with
          ``reusable=False`` and ``reusing=False`` makes it a cleanly isolated
          connection which is only referenced once here.

        :return: A :class:`.GinoConnection` object.

        """
        return _AcquireContext(
            functools.partial(self._acquire, timeout, reuse, lazy, reusable))

    async def _acquire(self, timeout, reuse, lazy, reusable):
        stack = _ContextualStack(self._ctx)
        if reuse and stack:
            dbapi_conn = _ReusingDBAPIConnection(self._dialect.cursor_cls,
                                                 stack.top)
            reusable = False
        else:
            dbapi_conn = _DBAPIConnection(self._dialect.cursor_cls, self._pool)
        rv = self.connection_cls(
            self._dialect,
            _SAConnection(self._sa_engine, dbapi_conn),
            stack if reusable else None,
        )
        dbapi_conn.gino_conn = rv
        if not lazy:
            await dbapi_conn.acquire(timeout=timeout)
        if reusable:
            stack.push(dbapi_conn)
        return rv

    @property
    def current_connection(self):
        """
        Gets the most recently acquired reusable connection in the context.
        ``None`` if there is no such connection.

        :return: :class:`.GinoConnection`

        """
        stack = self._ctx.get()
        if stack:
            return stack[-1].gino_conn

    async def close(self):
        """
        Close the engine, by closing the underlying pool.

        """
        await self._pool.close()

    async def all(self, clause, *multiparams, **params):
        """
        Acquires a connection with ``reuse=True`` and runs
        :meth:`~.GinoConnection.all` on it. ``reuse=True`` means you can safely
        do this without borrowing more than one underlying connection::

            async with engine.acquire():
                await engine.all('SELECT ...')

        The same applies for other query methods.

        """
        async with self.acquire(reuse=True) as conn:
            return await conn.all(clause, *multiparams, **params)

    async def first(self, clause, *multiparams, **params):
        """
        Runs :meth:`~.GinoConnection.first`, See :meth:`.all`.

        """
        async with self.acquire(reuse=True) as conn:
            return await conn.first(clause, *multiparams, **params)

    async def one_or_none(self, clause, *multiparams, **params):
        """
        Runs :meth:`~.GinoConnection.one_or_none`, See :meth:`.all`.

        """
        async with self.acquire(reuse=True) as conn:
            return await conn.one_or_none(clause, *multiparams, **params)

    async def one(self, clause, *multiparams, **params):
        """
        Runs :meth:`~.GinoConnection.one`, See :meth:`.all`.

        """
        async with self.acquire(reuse=True) as conn:
            return await conn.one(clause, *multiparams, **params)

    async def scalar(self, clause, *multiparams, **params):
        """
        Runs :meth:`~.GinoConnection.scalar`, See :meth:`.all`.

        """
        async with self.acquire(reuse=True) as conn:
            return await conn.scalar(clause, *multiparams, **params)

    async def status(self, clause, *multiparams, **params):
        """
        Runs :meth:`~.GinoConnection.status`. See also :meth:`.all`.

        """
        async with self.acquire(reuse=True) as conn:
            return await conn.status(clause, *multiparams, **params)

    def compile(self, clause, *multiparams, **params):
        """
        A shortcut for :meth:`~gino.dialects.base.AsyncDialectMixin.compile` on
        the dialect, returns raw SQL string and parameters according to the
        rules of the dialect.

        """
        return self._dialect.compile(clause, *multiparams, **params)

    def transaction(self,
                    *args,
                    timeout=None,
                    reuse=True,
                    reusable=True,
                    **kwargs):
        """
        Borrows a new connection and starts a transaction with it.

        Different to :meth:`.GinoConnection.transaction`, transaction on engine
        level supports only managed usage::

            async with engine.transaction() as tx:
                # play with transaction here

        Where the implicitly acquired connection is available as
        :attr:`tx.connection <gino.transaction.GinoTransaction.connection>`.

        By default, :meth:`.transaction` acquires connection with
        ``reuse=True`` and ``reusable=True``, that means it by default tries to
        create a nested transaction instead of a new transaction on a new
        connection. You can change the default behavior by setting these two
        arguments.

        The other arguments are the same as
        :meth:`~.GinoConnection.transaction` on connection.

        .. seealso::

            :meth:`.GinoEngine.acquire`

            :meth:`.GinoConnection.transaction`

            :class:`~gino.transaction.GinoTransaction`

        :return: A asynchronous context manager that yields a
          :class:`~gino.transaction.GinoTransaction`

        """
        return _TransactionContext(
            self.acquire(timeout=timeout, reuse=reuse, reusable=reusable),
            (args, kwargs),
        )

    def iterate(self, clause, *multiparams, **params):
        """
        Creates a server-side cursor in database for large query results.

        This requires that there is a reusable connection in the current
        context, and an active transaction is present. Then its
        :meth:`.GinoConnection.iterate` is executed and returned.

        """
        connection = self.current_connection
        if connection is None:
            raise ValueError("No Connection in context, please provide one")
        return connection.iterate(clause, *multiparams, **params)

    def update_execution_options(self, **opt):
        """Update the default execution_options dictionary
        of this :class:`.GinoEngine`.

        .. seealso::

            :meth:`sqlalchemy.engine.Engine.update_execution_options`

            :meth:`.GinoConnection.execution_options`

        """
        self._sa_engine.update_execution_options(**opt)

    async def _run_visitor(self, *args, **kwargs):
        async with self.acquire(reuse=True) as conn:
            await getattr(conn, "_run_visitor")(*args, **kwargs)

    def repr(self, color=False):
        return self._pool.repr(color)

    def __repr__(self):
        return self.repr()
Ejemplo n.º 7
0
def trace_stack_pop(trace_stack_var: ContextVar) -> None:
    """Remove the top element from a trace stack."""
    trace_stack = trace_stack_var.get()
    trace_stack.pop()
Ejemplo n.º 8
0
class Backendpy:
    """The Backendpy ASGI handler"""

    def __new__(cls, *args, **kwargs):
        """Process Backendpy class instance."""
        config = get_config(project_path=cls._get_project_path())
        cls._add_project_sys_path(config['environment']['project_path'])
        return MiddlewareProcessor(paths=parse_list(config['middlewares']['active'])) \
            .run_process_application(application=super().__new__(cls))

    def __init__(self):
        """Initialize Backendpy class instance."""
        self.config = get_config(project_path=self._get_project_path(), error_logs=True)
        self.context = dict()
        self._request_context_var = ContextVar('request')
        self._hook_runner = HookRunner()
        self._router = Router()
        self._middleware_processor = MiddlewareProcessor(
            paths=parse_list(self.config['middlewares']['active']))
        self.errors = base_errors
        self._project_apps = self._get_project_apps()
        for app_data in self._project_apps:
            if app_data['app'].routes:
                for i in app_data['app'].routes:
                    self._router.routes.merge(i)
            if app_data['app'].hooks:
                for i in app_data['app'].hooks:
                    self._hook_runner.hooks.merge(i)
            if app_data['app'].errors:
                for i in app_data['app'].errors:
                    self.errors.merge(i)
            if app_data['app'].template_dirs:
                Template.template_dirs[app_data['path']] = \
                    [Path(app_data['path']).joinpath(p) for p in app_data['app'].template_dirs]
        self._lifespan_startup = False

    async def __call__(self, scope, receive, send):
        """Receive the requests and return the responses."""
        if scope['type'] == 'http':
            if not self._lifespan_startup:
                try:
                    await self.execute_event('startup')
                except Exception as e:
                    LOGGER.exception(e)
                else:
                    self._lifespan_startup = True

            try:
                body: bytes = await self._get_request_body(receive)
            except Exception as e:
                LOGGER.exception(f'Request data receive error: {e}')
                response = Error(1000)
                await self._send_response(send, *await response(None))
                return

            try:
                request = Request(app=self, scope=scope, body=body)
            except Exception as e:
                LOGGER.exception(f'Request instance creation error: {e}')
                response = Error(1000)
                await self._send_response(send, *await response(None))
                return

            token = self._request_context_var.set(request)
            await self.execute_event('request_start')
            await self._send_response(send, *await self._get_response(request))
            await self.execute_event('request_end')
            self._request_context_var.reset(token)

        elif scope['type'] == 'websocket':
            # TODO
            raise NotImplementedError

        elif scope['type'] == 'lifespan':
            await self._handle_lifespan(receive, send)

    def get_current_request(self):
        """Return the current request object."""
        return self._request_context_var.get()

    def event(self, name: str) -> callable:
        """Register an event hook with python decorator.

        .. seealso:: :func:`~backendpy.hook.Hooks.event`
        """
        return self._hook_runner.hooks.event(name)

    async def execute_event(self, name: str, args: Optional[Mapping[str, Any]] = None) -> None:
        """Trigger all hooks related to the event.

        :param name: The name of an event
        :param args: A dictionary-like object containing arguments passed to the hook function.
        """
        return await self._hook_runner.trigger(name, args)

    async def _handle_lifespan(self, receive, send):
        while True:
            message = await receive()
            if message['type'] == 'lifespan.startup':
                try:
                    await self.execute_event('startup')
                except Exception as e:
                    LOGGER.exception(e)
                    await send({'type': 'lifespan.startup.failed',
                                'message': str(e)})
                else:
                    self._lifespan_startup = True
                    await send({'type': 'lifespan.startup.complete'})

            elif message['type'] == 'lifespan.shutdown':
                try:
                    await self.execute_event('shutdown')
                except Exception as e:
                    LOGGER.exception(e)
                    await send({'type': 'lifespan.shutdown.failed',
                                'message': str(e)})
                else:
                    await send({'type': 'lifespan.shutdown.complete'})
                    return

    async def _get_response(self, request):
        try:
            request = await self._middleware_processor.run_process_request(request=request)
        except ExceptionResponse as e:
            return await e(request)
        except Exception as e:
            LOGGER.exception(f'Middleware error: {e}')
            response = Error(1000)
            return await response(request)

        try:
            handler, data_handler_cls, request.url_vars = \
                self._router.match(request.path, request.method, request.scheme)
            if not handler:
                response = Error(1001)
                return await response(request)
        except Exception as e:
            LOGGER.exception(e)
            response = Error(1000)
            return await response(request)

        try:
            handler = await self._middleware_processor.run_process_handler(
                request=request,
                handler=handler)
        except ExceptionResponse as e:
            return await e(request)
        except Exception as e:
            LOGGER.exception(f'Middleware error: {e}')
            response = Error(1000)
            return await response(request)

        try:
            data_errors = None
            if data_handler_cls:
                request.cleaned_data, data_errors = \
                    await data_handler_cls(request=request).get_cleaned_data()
            if data_errors:
                response = Error(1002, data=data_errors)
                return await response(request)
        except Exception as e:
            LOGGER.exception(f'Data handler error: {e}')
            response = Error(1000)
            return await response(request)

        try:
            response = await handler(request=request)
        except ExceptionResponse as e:
            return await e(request)
        except Exception as e:
            LOGGER.exception(f'Handler error: {e}')
            response = Error(1000)
            return await response(request)

        try:
            response = await self._middleware_processor.run_process_response(
                request=request,
                response=response)
        except ExceptionResponse as e:
            return await e(request)
        except Exception as e:
            LOGGER.exception(f'Middleware error: {e}')
            response = Error(1000)
            return await response(request)

        return await response(request)

    @staticmethod
    async def _send_response(send, body, status, headers, stream=False):
        await send({
            'type': 'http.response.start',
            'status': status,
            'headers': headers})

        if stream:
            if hasattr(body, '__aiter__'):
                async for chunk in body:
                    await send({
                        'type': 'http.response.body',
                        'body': to_bytes(chunk),
                        'more_body': True})
            else:
                for chunk in body:
                    await send({
                        'type': 'http.response.body',
                        'body': to_bytes(chunk),
                        'more_body': True})
            await send({
                'type': 'http.response.body',
                'body': b''})
        else:
            await send({
                'type': 'http.response.body',
                'body': to_bytes(body)})

    @staticmethod
    async def _get_request_body(receive) -> bytes:
        # Todo: Problem for a huge body ?
        body = b''
        more_body = True
        while more_body:
            message = await receive()
            body += message.get('body', b'')
            more_body = message.get('more_body', False)
        return body

    @staticmethod
    async def _get_request_body_generator(receive):
        more_body = True
        while more_body:
            message = await receive()
            yield message.get('body', b'')
            more_body = message.get('more_body', False)

    def _get_project_apps(self):
        apps: list[dict] = list()
        for package_name in parse_list(self.config['apps']['active']):
            try:
                module = importlib.import_module(f'{package_name}.main')
                app = getattr(module, 'app')
                if isinstance(app, App):
                    apps.append(dict(
                        package_name=package_name,
                        path=os.path.dirname(os.path.abspath(module.__file__)),
                        app=app))
                else:
                    LOGGER.error(f'"{package_name}" app instance error')
            except (ImportError, AttributeError):
                LOGGER.error(f'"{package_name}" app instance import error')
        return apps

    @staticmethod
    def _get_project_path():
        return os.path.dirname(os.path.realpath(inspect.stack()[2].filename))

    @staticmethod
    def _add_project_sys_path(project_path):
        sys.path.insert(0, os.path.dirname(project_path))
Ejemplo n.º 9
0
def increase_counter(contextvar: ContextVar) -> Generator:
    token = contextvar.set(contextvar.get() + 1)
    try:
        yield
    finally:
        contextvar.reset(token)
Ejemplo n.º 10
0
            "deck": deck,
            "auth_token": request.app.get("auth_token"),
        },
    )


async def send_to_clients(data, serial=None):
    clients = CLIENTS.get()

    if client_id := data.get("client_id"):
        if (client := clients.get(client_id)) is None:
            return
        clients = {client_id: client}

    elif serial:
        watched_serials = WATCHED_SERIALS.get()
        clients = {client_id: clients[client_id] for client_id in watched_serials.get(serial, set())}

    for client in clients.values():
        try:
            await client["websocket"].send_json(data)
        except Exception:
            if logger.level <= logging.DEBUG:
                logger.error(
                    f'[WEB] Cannot send websocket message `{data}` to client "{client["id"]}"', exc_info=True
                )


async def handle_to_web_queue(queue, loop, server_task):
    watched_serials = WATCHED_SERIALS.get()
    while True:
Ejemplo n.º 11
0
class TimeoutIdleChecker(BaseIdleChecker):
    """
    Checks the idleness of a session by the elapsed time since last used.
    The usage means processing of any computation requests, such as
    query/batch-mode code execution and having active service-port connections.
    """

    name: ClassVar[str] = "timeout"

    _config_iv = t.Dict({
        t.Key('threshold', default="10m"): tx.TimeDuration(),
    }).allow_extra('*')

    idle_timeout: timedelta
    _policy_cache: ContextVar[Dict[AccessKey, Optional[Mapping[str, Any]]]]

    async def __ainit__(self) -> None:
        await super().__ainit__()
        self._policy_cache = ContextVar('_policy_cache')
        self._evh_session_started = \
            self._event_dispatcher.consume("session_started", None, self._session_started_cb)
        self._evh_execution_started = \
            self._event_dispatcher.consume("execution_started", None, self._execution_started_cb)
        self._evh_execution_finished = \
            self._event_dispatcher.consume("execution_finished", None, self._execution_exited_cb)
        self._evh_execution_timeout = \
            self._event_dispatcher.consume("execution_timeout", None, self._execution_exited_cb)
        self._evh_execution_cancelled = \
            self._event_dispatcher.consume("execution_cancelled", None, self._execution_exited_cb)

    async def aclose(self) -> None:
        self._event_dispatcher.unconsume("session_started",
                                         self._evh_session_started)
        self._event_dispatcher.unconsume("execution_started",
                                         self._evh_execution_started)
        self._event_dispatcher.unconsume("execution_finished",
                                         self._evh_execution_finished)
        self._event_dispatcher.unconsume("execution_timeout",
                                         self._evh_execution_timeout)
        self._event_dispatcher.unconsume("execution_cancelled",
                                         self._evh_execution_cancelled)
        await super().aclose()

    async def populate_config(self, raw_config: Mapping[str, Any]) -> None:
        config = self._config_iv.check(raw_config)
        self.idle_timeout = config['threshold']
        log.info(
            'TimeoutIdleChecker: default idle_timeout = {0:,} seconds',
            self.idle_timeout.total_seconds(),
        )

    async def update_app_streaming_status(
        self,
        session_id: SessionId,
        status: AppStreamingStatus,
    ) -> None:
        if status == AppStreamingStatus.HAS_ACTIVE_CONNECTIONS:
            await self._disable_timeout(session_id)
        elif status == AppStreamingStatus.NO_ACTIVE_CONNECTIONS:
            await self._update_timeout(session_id)

    async def _disable_timeout(self, session_id: SessionId) -> None:
        log.debug(f"TimeoutIdleChecker._disable_timeout({session_id})")
        await self._redis.set(f"session.{session_id}.last_access",
                              "0",
                              exist=self._redis.SET_IF_EXIST)

    async def _update_timeout(self, session_id: SessionId) -> None:
        log.debug(f"TimeoutIdleChecker._update_timeout({session_id})")
        t = await self._redis.time()
        await self._redis.set(
            f"session.{session_id}.last_access",
            f"{t:.06f}",
            expire=max(86400,
                       self.idle_timeout.total_seconds() * 2),
        )

    async def _session_started_cb(
        self,
        context: Any,
        agent_id: AgentId,
        event_name: str,
        session_id: SessionId,
        creation_id: str,
    ) -> None:
        await self._update_timeout(session_id)

    async def _execution_started_cb(
        self,
        context: Any,
        agent_id: AgentId,
        event_name: str,
        session_id: SessionId,
    ) -> None:
        await self._disable_timeout(session_id)

    async def _execution_exited_cb(
        self,
        context: Any,
        agent_id: AgentId,
        event_name: str,
        session_id: SessionId,
    ) -> None:
        await self._update_timeout(session_id)

    async def _do_idle_check(self, context: Any, agent_id: AgentId,
                             event_name: str) -> None:
        cache_token = self._policy_cache.set(dict())
        try:
            return await super()._do_idle_check(context, agent_id, event_name)
        finally:
            self._policy_cache.reset(cache_token)

    async def check_session(self, session: RowProxy,
                            dbconn: SAConnection) -> bool:
        session_id = session['id']
        active_streams = await self._redis.zcount(
            f"session.{session_id}.active_app_connections")
        if active_streams is not None and active_streams > 0:
            return True
        t = await self._redis.time()
        raw_last_access = await self._redis.get(
            f"session.{session_id}.last_access")
        if raw_last_access is None or raw_last_access == "0":
            return True
        last_access = float(raw_last_access)
        # serves as the default fallback if keypair resource policy's idle_timeout is "undefined"
        idle_timeout = self.idle_timeout.total_seconds()
        policy_cache = self._policy_cache.get()
        policy = policy_cache.get(session['access_key'], None)
        if policy is None:
            query = (sa.select([keypair_resource_policies]).select_from(
                sa.join(
                    keypairs,
                    keypair_resource_policies,
                    (keypair_resource_policies.c.name
                     == keypairs.c.resource_policy),
                )).where(keypairs.c.access_key == session['access_key']))
            result = await dbconn.execute(query)
            policy = await result.first()
            assert policy is not None
            policy_cache[session['access_key']] = policy
        # setting idle_timeout:
        # - zero/inf means "infinite"
        # - negative means "undefined"
        if policy['idle_timeout'] >= 0:
            idle_timeout = float(policy['idle_timeout'])
        if ((idle_timeout <= 0)
                or (math.isinf(idle_timeout) and idle_timeout > 0)
                or (t - last_access <= idle_timeout)):
            return True
        return False
Ejemplo n.º 12
0
hosts = ContextVar('hosts')
hosts.set({})


async def scan(host, port):
    href = '{}:{}'.format(host, port)
    print('scan start {}'.format(href))
    try:
        reader, writer = await open_connection(host, port)
        writer.write('Test \r\n'.encode('utf-8'))
        await writer.drain()
        result = await reader.read(1000)
        r = hosts.get()
        r[href] = result
        hosts.set(r)
        writer.close()
        await writer.wait_closed()
    except Exception as e:
        print('scan failed: {}'.format(e))


host = 'baidu.com'
loop = get_event_loop()
chunk = 100
for j in range(10):
    tasks = [scan(host, i + j * chunk) for i in range(1, chunk + 1)]
    loop.run_until_complete(wait(tasks))
loop.close()

r = hosts.get()
print(r.keys())
Ejemplo n.º 13
0
class CoreInterface:
    """Represent the interface of the ImJoy core."""

    # pylint: disable=no-self-use, too-many-instance-attributes, too-many-public-methods

    def __init__(self, app, imjoy_api=None, app_controller=None):
        """Set up instance."""
        self.event_bus = EventBus()
        self.current_user = ContextVar("current_user")
        self.current_plugin = ContextVar("current_plugin")
        self.current_workspace = ContextVar("current_workspace")
        self._all_users: Dict[str, UserInfo] = {}  # uid:user_info
        self._all_workspaces: Dict[str,
                                   WorkspaceInfo] = {}  # wid:workspace_info
        self._app = app
        self.app_controller = app_controller
        self.disconnect_delay = 1
        imjoy_api = imjoy_api or {}
        self._codecs = {}
        self._disconnected_plugins = []
        self._imjoy_api = dotdict({
            "_rintf": True,
            "log": self.info,
            "info": self.info,
            "error": self.error,
            "warning": self.warning,
            "critical": self.critical,
            "registerService": self.register_service,
            "register_service": self.register_service,
            "listServices": self.list_services,
            "list_services": self.list_services,
            "getService": self.get_service,
            "get_service": self.get_service,
            "utils": {},
            "listPlugins": self.list_plugins,
            "list_plugins": self.list_plugins,
            "getPlugin": self.get_plugin,
            "get_plugin": self.get_plugin,
            "generateToken": self.generate_token,
            "generate_token": self.generate_token,
            "create_workspace": self.create_workspace,
            "createWorkspace": self.create_workspace,
            "get_workspace": self.get_workspace_interface,
            "getWorkspace": self.get_workspace_interface,
            "list_workspaces": self.list_workspaces,
            "listWorkspaces": self.list_workspaces,
            "disconnect": self.disconnect,
        })
        self._imjoy_api.update(imjoy_api)

        # Add public workspace
        self.register_workspace(
            WorkspaceInfo.parse_obj({
                "name": "public",
                "persistent": True,
                "owners": ["root"],
                "allow_list": [],
                "deny_list": [],
                "visibility": "public",
            }))

        # Create root user
        self.root_user = UserInfo(
            id="root",
            is_anonymous=False,
            email=None,
            parent=None,
            roles=[],
            scopes=[],
            expires_at=None,
        )
        # Create root workspace
        self.root_workspace = WorkspaceInfo(
            name="root",
            owners=["root"],
            visibility=VisibilityEnum.protected,
            persistent=True,
        )
        self.root_workspace.set_global_event_bus(self.event_bus)
        self.register_workspace(self.root_workspace)
        self.load_extensions()

    def get_all_users(self):
        """Get all the users."""
        return list(self._all_users.values())

    def get_user_info_from_token(self, token):
        """Get user info from token."""
        user_info = parse_user(token)
        # Note here we only use the newly created user info object
        # if the same user id does not exist
        if user_info.id in self._all_users:
            user_info = self._all_users[user_info.id]
        else:
            self._all_users[user_info.id] = user_info
        return user_info

    async def restore_plugin(self, plugin):
        """Restore the plugin."""
        if plugin in self._disconnected_plugins:
            logger.info("Plugin connection restored")
            self._disconnected_plugins.remove(plugin)
        else:
            logger.warning("Plugin connection is not in the disconnected list")

    async def remove_plugin_delayed(self, plugin):
        """Remove the plugin after a delayed period (if not cancelled)."""
        await asyncio.sleep(self.disconnect_delay)
        # It means the session has been reconnected
        if plugin not in self._disconnected_plugins:
            return
        await self._terminate_plugin(plugin)

    def remove_plugin_temp(self, sid):
        """Remove session temporarily."""
        plugin = DynamicPlugin.get_plugin_by_session_id(sid)
        if plugin is None:
            logger.warning(
                "Plugin (sid: %s) does not exist or has already been terminated.",
                sid)
            return
        if plugin not in self._disconnected_plugins:
            self._disconnected_plugins.append(plugin)
        loop = asyncio.get_running_loop()
        loop.create_task(self.remove_plugin_delayed(plugin))

    async def disconnect(self, ):
        """Disconnect from the workspace."""
        plugin = self.current_plugin.get()
        await self._terminate_plugin(plugin)

    async def _terminate_plugin(self, plugin):
        """Terminate the plugin."""
        await plugin.terminate()
        user_info = plugin.user_info
        # Remove the user completely if no plugins exists
        if len(user_info.get_plugins()) <= 0:
            del self._all_users[user_info.id]
            logger.info(
                "Removing user (%s) completely since the user "
                "has no other plugin connected.",
                user_info.id,
            )

        workspace = plugin.workspace
        # Remove the user completely if no plugins exists
        if len(workspace.get_plugins()) <= 0 and not workspace.persistent:
            del self._all_workspaces[workspace.name]
            logger.info(
                "Removing workspace (%s) completely "
                "since there is no other plugin connected.",
                workspace.name,
            )

    def check_permission(self, workspace, user_info):
        """Check user permission for a workspace."""
        # pylint: disable=too-many-return-statements
        if isinstance(workspace, str):
            workspace = self.get_workspace(workspace)
            if not workspace:
                logger.error("Workspace %s not found", workspace)
                return False

        # Make exceptions for root user, the children of root and test workspace
        if (user_info.id == "root" or user_info.parent == "root"
                or workspace.name == "public"):
            return True

        if workspace.name == user_info.id:
            return True

        if user_info.parent:
            parent = self._all_users.get(user_info.parent)
            if not parent:
                return False
            if not self.check_permission(workspace, parent):
                return False
            # if the parent has access
            # and the workspace is in the scopes
            # then we allow the access
            if workspace.name in user_info.scopes:
                return True

        _id = user_info.email or user_info.id

        if _id in workspace.owners:
            return True

        if workspace.visibility == VisibilityEnum.public:
            if workspace.deny_list and user_info.email not in workspace.deny_list:
                return True
        elif workspace.visibility == VisibilityEnum.protected:
            if workspace.allow_list and user_info.email in workspace.allow_list:
                return True

        if "admin" in user_info.roles:
            logger.info("Allowing access to %s for admin user %s",
                        workspace.name, user_info.id)
            return True

        return False

    def get_all_workspace(self):
        """Return all workspaces."""
        return list(self._all_workspaces.values())

    def is_workspace_registered(self, ws):
        """Return true if workspace is registered."""
        if ws in self._all_workspaces.values():
            return True
        return False

    def get_workspace(self, name):
        """Return the workspace."""
        if name in self._all_workspaces:
            return self._all_workspaces[name]
        return None

    def register_workspace(self, ws):
        """Register the workspace."""
        ws.set_global_event_bus(self.event_bus)
        if ws.name in self._all_workspaces:
            raise Exception(
                f"Another workspace with the same name {ws.name} already exist."
            )
        self._all_workspaces[ws.name] = ws
        self.event_bus.emit("workspace_registered", ws)

    def unregister_workspace(self, name):
        """Unregister the workspace."""
        if name not in self._all_workspaces:
            raise Exception(f"Workspace has not been registered: {name}")
        ws = self._all_workspaces[name]
        del self._all_workspaces[name]
        self.event_bus.emit("workspace_unregistered", ws)

    def load_extensions(self):
        """Load imjoy engine extensions."""
        # Support imjoy engine extensions
        # See how it works:
        # https://packaging.python.org/guides/creating-and-discovering-plugins/
        for entry_point in pkg_resources.iter_entry_points(
                "imjoy_engine_extension"):
            self.current_user.set(self.root_user)
            self.current_workspace.set(self.root_workspace)
            try:
                setup_extension = entry_point.load()
                setup_extension(self)
            except Exception:
                logger.exception("Failed to setup extension: %s",
                                 entry_point.name)
                raise

    def register_router(self, router):
        """Register a router."""
        self._app.include_router(router)

    def register_interface(self, name, func):
        """Register a interface function."""
        assert callable(func)
        self._imjoy_api[name] = func

    def register_service(self, service: dict):
        """Register a service."""
        plugin = self.current_plugin.get()
        workspace = self.current_workspace.get()
        if "name" not in service or "type" not in service:
            raise Exception(
                "Service should at least contain `name` and `type`")

        # TODO: check if it's already exists
        service.config = service.get("config", {})
        assert isinstance(service.config,
                          dict), "service.config must be a dictionary"
        service.config["workspace"] = workspace.name
        formated_service = ServiceInfo.parse_obj(service)
        formated_service.set_provider(plugin)
        service_dict = formated_service.dict()
        if formated_service.config.require_context:
            for key in service_dict:
                if callable(service_dict[key]):

                    def wrap_func(func, *args, **kwargs):
                        user_info = self.current_user.get()
                        workspace = self.current_workspace.get()
                        kwargs["context"] = {
                            "user_id": user_info.id,
                            "email": user_info.email,
                            "is_anonymous": user_info.email,
                            "workspace": workspace.name,
                        }
                        return func(*args, **kwargs)

                    setattr(formated_service, key,
                            partial(wrap_func, service_dict[key]))
        # service["_rintf"] = True
        # Note: service can set its `visibility` to `public` or `protected`
        workspace.add_service(formated_service)
        return formated_service.get_id()

    def unregister_service(self, service_id):
        """Unregister an service."""
        workspace = self.current_workspace.get()
        plugin = self.current_plugin.get()
        services = workspace.get_services_by_plugin(plugin)
        not_exists = True
        for service in services:
            if service.get_id() == service_id:
                workspace.remove_service(service)
                not_exists = False
        if not_exists:
            raise KeyError(
                f"The service {service_id} is not registered in the current workspace."
            )

    def list_plugins(self):
        """List all plugins in the workspace."""
        workspace = self.current_workspace.get()
        return list(workspace.get_plugins())

    async def get_plugin(self, name):
        """Return a plugin by its name."""
        workspace = self.current_workspace.get()
        plugin = workspace.get_plugin_by_name(name)
        if plugin:
            return await plugin.get_api()
        raise Exception(f"Plugin {name} not found")

    async def get_service(self, service_id):
        """Return a service."""
        if isinstance(service_id, str):
            query = {"id": service_id}
        else:
            query = service_id

        if "workspace" in query:
            workspace = self.get_workspace(query["workspace"])
            if not workspace:
                raise Exception(
                    f"Service not found: {service_id} (workspace unavailable)")
        else:
            workspace = self.current_workspace.get()

        if "id" in query:
            service = workspace.get_services().get(query["id"])
            if not service:
                raise Exception(f"Service not found: {query['id']}")
        elif "name" in query:
            service = workspace.get_service_by_name(query["name"])
            if not service:
                raise Exception(f"Service not found: {query['name']}")
        else:
            raise Exception(
                "Please specify the service id or name to get the service")

        user_info = self.current_user.get()
        if (not self.check_permission(workspace, user_info)
                and service.config.visibility != VisibilityEnum.public):
            raise Exception(f"Permission denied: {service_id}")

        return service.dict()

    def list_workspaces(self, ):
        """List the workspaces for the user."""
        user_info = self.current_user.get()
        ret = []
        for workspace in self._all_workspaces.values():
            if self.check_permission(workspace, user_info):
                ret.append({"name": workspace.name})
        return ret

    def list_services(self, query: Optional[dict] = None):
        """Return a list of services based on the query."""
        # if workspace is not set, then it means current workspace
        # if workspace = *, it means search globally
        # otherwise, it search the specified workspace
        user_info = self.current_user.get()
        if query is None:
            query = {"workspace": "*"}

        ws = query.get("workspace")
        if ws:
            del query["workspace"]
        if ws == "*":
            ret = []
            for workspace in self.get_all_workspace():
                can_access_ws = self.check_permission(workspace, user_info)
                for service in workspace.get_services().values():
                    # To access the service, it should be public or owned by the user
                    if (not can_access_ws and service.config.visibility !=
                            VisibilityEnum.public):
                        continue
                    match = True
                    for key in query:
                        if getattr(service, key) != query[key]:
                            match = False
                    if match:
                        ret.append(service.get_summary())
            return ret
        if ws is not None:
            workspace = self.get_workspace(ws)
        else:
            workspace = self.current_workspace.get()
        ret = []
        workspace_services = workspace.get_services()
        for service in workspace_services.values():
            match = True
            for key in query:
                if getattr(service, key) != query[key]:
                    match = False
            if match:
                ret.append(service.get_summary())

        if workspace is None:
            raise Exception("Workspace not found: {ws}")

        return ret

    def info(self, msg):
        """Log a plugin message."""
        plugin = self.current_plugin.get()
        logger.info("%s: %s", plugin.name, msg)
        workspace_logger = plugin.workspace.get_logger()
        if workspace_logger:
            workspace_logger.info("%s: %s", plugin.name, msg)

    def warning(self, msg):
        """Log a plugin message (warning)."""
        plugin = self.current_plugin.get()
        workspace_logger = plugin.workspace.get_logger()
        if workspace_logger:
            workspace_logger.warning("%s: %s", plugin.name, msg)

    def error(self, msg):
        """Log a plugin error message (error)."""
        plugin = self.current_plugin.get()
        workspace_logger = plugin.workspace.get_logger()
        if workspace_logger:
            workspace_logger.error("%s: %s", plugin.name, msg)

    def critical(self, msg):
        """Log a plugin error message (critical)."""
        plugin = self.current_plugin.get()
        workspace_logger = plugin.workspace.get_logger()
        if workspace_logger:
            workspace_logger.critical("%s: %s", plugin.name, msg)

    def generate_token(self, config: Optional[dict] = None):
        """Generate a token for the current workspace."""
        workspace = self.current_workspace.get()
        user_info = self.current_user.get()
        config = config or {}
        if "scopes" in config and config["scopes"] != [workspace.name]:
            raise Exception(
                "Scopes must be empty or contains only the workspace name.")
        config["scopes"] = [workspace.name]
        token_config = TokenConfig.parse_obj(config)
        scopes = token_config.scopes
        for scope in scopes:
            if not self.check_permission(scope, user_info):
                raise PermissionError(
                    f"User has no permission to scope: {scope}")
        token = generate_presigned_token(user_info, token_config)
        return token

    def create_workspace(self, config: dict):
        """Create a new workspace."""
        user_info = self.current_user.get()
        config["persistent"] = config.get("persistent") or False
        if user_info.is_anonymous and config["persistent"]:
            raise Exception(
                "Only registered user can create persistent workspace.")
        workspace = WorkspaceInfo.parse_obj(config)
        workspace.set_global_event_bus(self.event_bus)
        if self.get_workspace(workspace.name):
            raise Exception(f"Workspace {workspace.name} already exists.")
        user_info = self.current_user.get()
        # make sure we add the user's email to owners
        _id = user_info.email or user_info.id
        if _id not in workspace.owners:
            workspace.owners.append(_id)
        workspace.owners = [o.strip() for o in workspace.owners if o.strip()]
        user_info.scopes.append(workspace.name)
        self.register_workspace(workspace)
        return self.get_workspace_interface(workspace.name)

    def _update_workspace(self, name, config: dict):
        """Bind the context to the generated workspace."""
        if not name:
            raise Exception("Workspace name is not specified.")
        if not self.get_workspace(name):
            raise Exception(f"Workspace {name} not found")
        workspace = self.get_workspace(name)
        user_info = self.current_user.get()
        if not self.check_permission(workspace, user_info):
            raise PermissionError(f"Permission denied for workspace {name}")

        if "name" in config:
            raise Exception("Changing workspace name is not allowed.")

        # make sure all the keys are valid
        # TODO: verify the type
        for key in config:
            if key.startswith("_") or not hasattr(workspace, key):
                raise KeyError(f"Invalid key: {key}")

        for key in config:
            if not key.startswith("_") and hasattr(workspace, key):
                setattr(workspace, key, config[key])
        # make sure we add the user's email to owners
        _id = user_info.email or user_info.id
        if _id not in workspace.owners:
            workspace.owners.append(_id)
        workspace.owners = [o.strip() for o in workspace.owners if o.strip()]

    def get_workspace_interface(self, name: str):
        """Bind the context to the generated workspace."""
        workspace = self.get_workspace(name)
        if not workspace:
            raise Exception(f"Workspace {name} not found")
        user_info = self.current_user.get()
        if not self.check_permission(workspace, user_info):
            raise PermissionError(f"Permission denied for workspace {name}")

        interface = self.get_interface()
        bound_interface = {}
        for key in interface:
            if callable(interface[key]):

                async def wrap_func(func, *args, **kwargs):
                    try:
                        workspace_bk = self.current_workspace.get()
                    except LookupError:
                        workspace_bk = None
                    ret = None
                    try:
                        self.current_workspace.set(workspace)
                        ret = func(*args, **kwargs)
                        if inspect.isawaitable(ret):
                            ret = await ret
                    except Exception as exp:
                        raise exp
                    finally:
                        self.current_workspace.set(workspace_bk)
                    return ret

                bound_interface[key] = partial(wrap_func, interface[key])
                bound_interface[key].__name__ = key  # required for imjoy-rpc
            else:
                bound_interface[key] = interface[key]
        bound_interface["config"] = json.loads(workspace.json())
        bound_interface["set"] = partial(self._update_workspace, name)
        bound_interface["_rintf"] = True
        event_bus = workspace.get_event_bus()
        bound_interface["on"] = event_bus.on
        bound_interface["off"] = event_bus.off
        bound_interface["once"] = event_bus.once
        bound_interface["emit"] = event_bus.emit
        # Remove disconnect, since the plugin can call disconnect()
        # from their own workspace
        del bound_interface["disconnect"]
        self.event_bus.emit("user_entered_workspace", (user_info, workspace))
        return bound_interface

    def get_workspace_as_root(self, name="root"):
        """Get a workspace api as root user."""
        self.current_user.set(self.root_user)
        return dotdict(self.get_workspace_interface(name))

    async def get_plugin_as_root(self, name, workspace):
        """Get a plugin api as root user."""
        self.current_user.set(self.root_user)
        workspace = self.get_workspace(workspace)
        if not workspace:
            raise Exception(f"Workspace {workspace} does not exist.")
        self.current_workspace.set(workspace)
        return dotdict(await self.get_plugin(name))

    def get_interface(self):
        """Return the interface."""
        return self._imjoy_api.copy()

    def register_codec(self, config):
        """Register a codec."""
        assert "name" in config
        assert "encoder" in config or "decoder" in config
        if "type" in config:
            for codec_type, codec in list(self._codecs.items()):
                if codec.type == config["type"] or codec_type == config["name"]:
                    logger.info("Removing duplicated codec: %s", codec_type)
                    del self._codecs[codec_type]

        self._codecs[config["name"]] = dotdict(config)

    def get_codecs(self):
        """Return registered codecs for rpc."""
        return self._codecs

    def mount_app(self, path, app, name=None, priority=-1):
        """Mount an app to fastapi."""
        route = Mount(path, app, name=name)
        # remove existing path
        routes_remove = [
            route for route in self._app.routes if route.path == path
        ]
        for rou in routes_remove:
            self._app.routes.remove(rou)
        # The default priority is -1 which assumes the last one is websocket
        self._app.routes.insert(priority, route)

    def umount_app(self, path):
        """Unmount an app to fastapi."""
        routes_remove = [
            route for route in self._app.routes if route.path == path
        ]
        for route in routes_remove:
            self._app.routes.remove(route)
Ejemplo n.º 14
0
class BaseContainerRegistry(metaclass=ABCMeta):

    etcd: AsyncEtcd
    registry_name: str
    registry_info: Mapping[str, Any]
    registry_url: yarl.URL
    max_concurrency_per_registry: int
    base_hdrs: Dict[str, str]
    credentials: Dict[str, str]
    ssl_verify: bool

    sema: ContextVar[asyncio.Semaphore]
    reporter: ContextVar[Optional[ProgressReporter]]
    all_updates: ContextVar[Dict[str, str]]

    def __init__(
        self,
        etcd: AsyncEtcd,
        registry_name: str,
        registry_info: Mapping[str, Any],
        *,
        max_concurrency_per_registry: int = 4,
        ssl_verify: bool = True,
    ) -> None:
        self.etcd = etcd
        self.registry_name = registry_name
        self.registry_info = registry_info
        self.registry_url = registry_info['']
        self.max_concurrency_per_registry = max_concurrency_per_registry
        self.base_hdrs = {
            'Accept': 'application/vnd.docker.distribution.manifest.v2+json',
        }
        self.credentials = {}
        self.ssl_verify = ssl_verify
        self.sema = ContextVar('sema')
        self.reporter = ContextVar('reporter', default=None)
        self.all_updates = ContextVar('all_updates')

    async def rescan_single_registry(
        self,
        reporter: ProgressReporter = None,
    ) -> None:
        self.all_updates.set({})
        self.sema.set(asyncio.Semaphore(self.max_concurrency_per_registry))
        self.reporter.set(reporter)
        username = self.registry_info['username']
        if username is not None:
            self.credentials['username'] = username
        password = self.registry_info['password']
        if password is not None:
            self.credentials['password'] = password
        non_kernel_words = (
            'common-',
            'commons-',
            'base-',
            'krunner',
            'builder',
            'backendai',
            'geofront',
        )
        ssl_ctx = None  # default
        if not self.registry_info['ssl-verify']:
            ssl_ctx = False
        connector = aiohttp.TCPConnector(ssl=ssl_ctx)
        async with aiohttp.ClientSession(connector=connector) as sess:
            async with aiotools.TaskGroup() as tg:
                async for image in self.fetch_repositories(sess):
                    if not any((w in image) for w in
                               non_kernel_words):  # skip non-kernel images
                        tg.create_task(self._scan_image(sess, image))
        all_updates = self.all_updates.get()
        if not all_updates:
            log.info('No images found in registry {0}', self.registry_url)
        else:
            for kvlist in chunked(sorted(all_updates.items()), 16):
                await self.etcd.put_dict(dict(kvlist))

    async def _scan_image(
        self,
        sess: aiohttp.ClientSession,
        image: str,
    ) -> None:
        rqst_args = await registry_login(
            sess,
            self.registry_url,
            self.credentials,
            f'repository:{image}:pull',
        )
        rqst_args['headers'].update(**self.base_hdrs)
        tags = []
        tag_list_url: Optional[yarl.URL]
        tag_list_url = (self.registry_url /
                        f'v2/{image}/tags/list').with_query({'n': '10'}, )
        while tag_list_url is not None:
            async with sess.get(tag_list_url, **rqst_args) as resp:
                data = json.loads(await resp.read())
                if 'tags' in data:
                    # sometimes there are dangling image names in the hub.
                    tags.extend(data['tags'])
                tag_list_url = None
                next_page_link = resp.links.get('next')
                if next_page_link:
                    next_page_url = cast(yarl.URL, next_page_link['url'])
                    tag_list_url = (self.registry_url.with_path(
                        next_page_url.path).with_query(next_page_url.query))
        if (reporter := self.reporter.get()) is not None:
            reporter.total_progress += len(tags)
        async with aiotools.TaskGroup() as tg:
            for tag in tags:
                tg.create_task(self._scan_tag(sess, rqst_args, image, tag))
Ejemplo n.º 15
0
class PurityClient:

    endpoint: URL
    api_token: str
    api_version: str
    auth_token: ContextVar[str]

    _session: aiohttp.ClientSession
    _auth_token_cvtoken: Token

    def __init__(
        self,
        endpoint: str,
        api_token: str,
        *,
        api_version: str = "1.8",
    ) -> None:
        self.endpoint = URL(endpoint)
        self.api_token = api_token
        self.api_version = api_version
        self.auth_token = ContextVar("auth_token")
        self._session = aiohttp.ClientSession()

    async def aclose(self) -> None:
        await self._session.close()

    async def __aenter__(self) -> PurityClient:
        async with self._session.post(
            self.endpoint / "api" / "login",
            headers={"api-token": self.api_token},
            ssl=False,
            raise_for_status=True,
        ) as resp:
            auth_token = resp.headers["x-auth-token"]
            self._auth_token_cvtoken = self.auth_token.set(auth_token)
            _ = await resp.json()
        return self

    async def __aexit__(self, *exc_info) -> None:
        self.auth_token.reset(self._auth_token_cvtoken)

    # For the concrete API reference, check out:
    # https://purity-fb.readthedocs.io/en/latest/

    async def get_metadata(self) -> Mapping[str, Any]:
        if self.auth_token is None:
            raise RuntimeError("The auth token for Purity API is not initialized.")
        items = []
        pagination_token = ""
        while True:
            async with self._session.get(
                (self.endpoint / "api" / self.api_version / "arrays"),
                headers={"x-auth-token": self.auth_token.get()},
                params={
                    "items_returned": 10,
                    "token": pagination_token,
                },
                ssl=False,
                raise_for_status=True,
            ) as resp:
                data = await resp.json()
                for item in data["items"]:
                    items.append(item)
                pagination_token = data["pagination_info"]["continuation_token"]
                if pagination_token is None:
                    break
        if not items:
            return {}
        first = items[0]
        return {
            "id": first["id"],
            "name": first["name"],
            "os": first["os"],
            "revision": first["revision"],
            "version": first["version"],
            "blade_count": str(len(items)),
            "console_url": str(self.endpoint),
        }

    async def get_nfs_metric(
        self,
        fs_name: str,
    ) -> AsyncGenerator[Mapping[str, Any], None]:
        if self.auth_token is None:
            raise RuntimeError("The auth token for Purity API is not initialized.")
        pagination_token = ""
        while True:
            async with self._session.get(
                (
                    self.endpoint
                    / "api"
                    / self.api_version
                    / "file-systems"
                    / "performance"
                ),
                headers={"x-auth-token": self.auth_token.get()},
                params={
                    "names": fs_name,
                    "protocol": "NFS",
                    "items_returned": 10,
                    "token": pagination_token,
                },
                ssl=False,
                raise_for_status=True,
            ) as resp:
                data = await resp.json()
                for item in data["items"]:
                    yield item
                pagination_token = data["pagination_info"]["continuation_token"]
                if pagination_token is None:
                    break

    async def get_usage(self, fs_name: str) -> Mapping[str, Any]:
        if self.auth_token is None:
            raise RuntimeError("The auth token for Purity API is not initialized.")
        items = []
        pagination_token = ""
        while True:
            async with self._session.get(
                (self.endpoint / "api" / self.api_version / "file-systems"),
                headers={"x-auth-token": self.auth_token.get()},
                params={
                    "names": fs_name,
                    "items_returned": 10,
                    "token": pagination_token,
                },
                ssl=False,
                raise_for_status=True,
            ) as resp:
                data = await resp.json()
                for item in data["items"]:
                    items.append(item)
                pagination_token = data["pagination_info"]["continuation_token"]
                if pagination_token is None:
                    break
        if not items:
            return {}
        first = items[0]
        return {
            "capacity_bytes": data["total"]["provisioned"],
            "used_bytes": first["space"]["total_physical"],
        }
Ejemplo n.º 16
0
class LocalStack(Generic[T]):
    """LocalStack.

    Manage state per coroutine (also thread safe).

    Most famously used probably in Flask to keep track of the current
    request object.
    """

    _stack: ContextVar[Optional[List[T]]]

    def __init__(self) -> None:
        self._stack = ContextVar('_stack')

    # XXX mypy bug; when fixed type Generator, should be ContextManager.
    @contextmanager
    def push(self, obj: T) -> Generator[None, None, None]:
        """Push a new item to the stack."""
        self.push_without_automatic_cleanup(obj)
        try:
            yield
        finally:
            self.pop()

    def push_without_automatic_cleanup(self, obj: T) -> None:
        stack = self._stack.get(None)
        if stack is None:
            stack = []
            self._stack.set(stack)
        stack.append(obj)

    def pop(self) -> Optional[T]:
        """Remove the topmost item from the stack.

        Note:
            Will return the old value or `None` if the stack was already empty.
        """
        stack = self._stack.get(None)
        if stack is None:
            return None
        else:
            size = len(stack)
            if not size:
                self._stack.set(None)
                return None
            elif size == 1:
                item = stack[-1]
                self._stack.set(None)
            else:
                item = stack.pop()
            return item

    def __len__(self) -> int:
        stack = self._stack.get(None)
        return len(stack) if stack else 0

    @property
    def stack(self) -> Sequence[T]:
        # read-only version, do not modify
        return self._stack.get(None) or []

    @property
    def top(self) -> Optional[T]:
        """Return the topmost item on the stack.

        Does not remove it from the stack.

        Note:
            If the stack is empty, :const:`None` is returned.
        """
        stack = self._stack.get(None)
        return stack[-1] if stack else None
Ejemplo n.º 17
0
class PurityClient:

    endpoint: URL
    api_token: str
    api_version: str
    auth_token: ContextVar[str]

    _session: aiohttp.ClientSession
    _auth_token_cvtoken: Token

    def __init__(
        self,
        endpoint: str,
        api_token: str,
        *,
        api_version: str = '1.8',
    ) -> None:
        self.endpoint = URL(endpoint)
        self.api_token = api_token
        self.api_version = api_version
        self.auth_token = ContextVar('auth_token')
        self._session = aiohttp.ClientSession()

    async def aclose(self) -> None:
        await self._session.close()

    async def __aenter__(self) -> PurityClient:
        async with self._session.post(
                self.endpoint / 'api' / 'login',
                headers={'api-token': self.api_token},
                ssl=False,
                raise_for_status=True,
        ) as resp:
            auth_token = resp.headers['x-auth-token']
            self._auth_token_cvtoken = self.auth_token.set(auth_token)
            _ = await resp.json()
        return self

    async def __aexit__(self, *exc_info) -> None:
        self.auth_token.reset(self._auth_token_cvtoken)

    async def get_nfs_metric(
            self, fs_name: str) -> AsyncGenerator[Mapping[str, Any], None]:
        if self.auth_token is None:
            raise RuntimeError(
                'The auth token for Purity API is not initialized.')
        pagination_token = ''
        while True:
            async with self._session.get(
                (self.endpoint / 'api' / self.api_version / 'file-systems' /
                 'performance'),
                    headers={'x-auth-token': self.auth_token.get()},
                    params={
                        'names': fs_name,
                        'protocol': 'NFS',
                        'items_returned': 10,
                        'token': pagination_token,
                    },
                    ssl=False,
                    raise_for_status=True,
            ) as resp:
                data = await resp.json()
                for item in data['items']:
                    yield item
                pagination_token = data['pagination_info'][
                    'continuation_token']
                if pagination_token is None:
                    break
Ejemplo n.º 18
0
class SentryReporter:
    """SentryReporter designed for sending reports to the Sentry server from
    a Tribler Client.
    """
    def __init__(self):
        self.scrubber = None
        self.last_event = None
        self.ignored_exceptions = [KeyboardInterrupt, SystemExit]
        # more info about how SentryReporter choose a strategy see in
        # SentryReporter.get_actual_strategy()
        self.global_strategy = SentryStrategy.SEND_ALLOWED_WITH_CONFIRMATION
        self.thread_strategy = ContextVar('context_strategy', default=None)

        self._sentry_logger_name = 'SentryReporter'
        self._logger = logging.getLogger(self._sentry_logger_name)

    def init(self,
             sentry_url='',
             release_version='',
             scrubber=None,
             strategy=SentryStrategy.SEND_ALLOWED_WITH_CONFIRMATION):
        """Initialization.

        This method should be called in each process that uses SentryReporter.

        Args:
            sentry_url: URL for Sentry server. If it is empty then Sentry's
                sending mechanism will not be initialized.

            scrubber: a class that will be used for scrubbing sending events.
                Only a single method should be implemented in the class:
                ```
                    def scrub_event(self, event):
                        pass
                ```
            release_version: string that represents a release version.
                See Also: https://docs.sentry.io/platforms/python/configuration/releases/
            strategy: a Sentry strategy for sending events (see class Strategy
                for more information)
        Returns:
            Sentry Guard.
        """
        self._logger.debug(f"Init: {sentry_url}")
        self.scrubber = scrubber
        self.global_strategy = strategy

        rv = sentry_sdk.init(
            sentry_url,
            release=release_version,
            # https://docs.sentry.io/platforms/python/configuration/integrations/
            integrations=[
                LoggingIntegration(
                    level=logging.
                    INFO,  # Capture info and above as breadcrumbs
                    event_level=None,  # Send no errors as events
                ),
                ThreadingIntegration(propagate_hub=True),
            ],
            before_send=self._before_send,
            ignore_errors=[KeyboardInterrupt],
        )

        ignore_logger(self._sentry_logger_name)

        return rv

    def ignore_logger(self, logger_name: str):
        self._logger.debug(f"Ignore logger: {logger_name}")
        ignore_logger(logger_name)

    def add_breadcrumb(self, message='', category='', level='info', **kwargs):
        """Adds a breadcrumb for current Sentry client.

        It is necessary to specify a message, a category and a level to make this
        breadcrumb visible in Sentry server.

        Args:
            **kwargs: named arguments that will be added to Sentry event as well
        """
        crumb = {'message': message, 'category': category, 'level': level}

        self._logger.debug(f"Add the breadcrumb: {crumb}")

        return sentry_sdk.add_breadcrumb(crumb, **kwargs)

    def send_event(self,
                   event: Dict = None,
                   post_data: Dict = None,
                   sys_info: Dict = None,
                   additional_tags: List[str] = None,
                   retrieve_error_message_from_stacktrace=False):
        """Send the event to the Sentry server

        This method
            1. Enable Sentry's sending mechanism.
            2. Extend sending event by the information from post_data.
            3. Send the event.
            4. Disables Sentry's sending mechanism.

        Scrubbing the information will be performed in the `_before_send` method.

        During the execution of this method, all unhandled exceptions that
        will be raised, will be sent to Sentry automatically.

        Args:
            event: event to send. It should be taken from SentryReporter at
            post_data: dictionary made by the feedbackdialog.py
                previous stages of executing.
            sys_info: dictionary made by the feedbackdialog.py
            additional_tags: tags that will be added to the event

        Returns:
            Event that was sent to Sentry server
        """
        self._logger.info(f"Send: {post_data}, {event}")

        if event is None:
            return event

        post_data = post_data or dict()
        sys_info = sys_info or dict()
        additional_tags = additional_tags or dict()

        if CONTEXTS not in event:
            event[CONTEXTS] = {}

        if TAGS not in event:
            event[TAGS] = {}

        event[CONTEXTS][REPORTER] = {}

        # tags
        tags = event[TAGS]
        tags['version'] = get_value(post_data, 'version')
        tags['machine'] = get_value(post_data, 'machine')
        tags['os'] = get_value(post_data, 'os')
        tags['platform'] = get_first_item(get_value(sys_info, 'platform'))
        tags[f'{PLATFORM_DETAILS}'] = get_first_item(
            get_value(sys_info, PLATFORM_DETAILS))
        tags.update(additional_tags)

        # context
        context = event[CONTEXTS]
        reporter = context[REPORTER]
        version = get_value(post_data, 'version')

        context['browser'] = {'version': version, 'name': 'Tribler'}

        stacktrace_parts = parse_stacktrace(get_value(post_data, 'stack'))
        reporter[STACKTRACE] = next(stacktrace_parts, [])
        stacktrace_extra = next(stacktrace_parts, [])
        reporter[f'{STACKTRACE}_extra'] = stacktrace_extra
        reporter[f'{STACKTRACE}_context'] = next(stacktrace_parts, [])

        reporter['comments'] = get_value(post_data, 'comments')

        reporter[OS_ENVIRON] = parse_os_environ(get_value(
            sys_info, OS_ENVIRON))
        delete_item(sys_info, OS_ENVIRON)

        reporter['events'] = extract_dict(sys_info, r'^(event|request)')
        reporter[SYSINFO] = {
            key: sys_info[key]
            for key in sys_info if key not in reporter['events']
        }

        # try to retrieve an error from the stacktrace
        if retrieve_error_message_from_stacktrace and stacktrace_extra:
            exception_value = stacktrace_extra[-1].split(':', maxsplit=1)
            exception_values = event.get(EXCEPTION, {}).get(VALUES, [])
            if len(exception_value) == 2:
                exception_values.append({
                    'type': exception_value[0],
                    'value': exception_value[1]
                })

        with this_sentry_strategy(self, SentryStrategy.SEND_ALLOWED):
            sentry_sdk.capture_event(event)

        return event

    def get_confirmation(self, exception):
        """Get confirmation on sending exception to the Team.

        There are two message boxes, that will be triggered:
        1. Message box with the error_text
        2. Message box with confirmation about sending this report to the Tribler
            team.

        Args:
            exception: exception to be sent.
        """
        # pylint: disable=import-outside-toplevel
        try:
            from PyQt5.QtWidgets import QApplication, QMessageBox
        except ImportError:
            self._logger.debug(
                "PyQt5 is not available. User confirmation is not possible.")
            return False

        self._logger.debug(f"Get confirmation: {exception}")

        _ = QApplication(sys.argv)
        messagebox = QMessageBox(icon=QMessageBox.Critical,
                                 text=f'{exception}.')
        messagebox.setWindowTitle("Error")
        messagebox.exec()

        messagebox = QMessageBox(
            icon=QMessageBox.Question,
            text='Do you want to send this crash report to the Tribler team? '
            'We anonymize all your data, who you are and what you downloaded.',
        )
        messagebox.setWindowTitle("Error")
        messagebox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)

        return messagebox.exec() == QMessageBox.Yes

    def capture_exception(self, exception):
        self._logger.info(f"Capture exception: {exception}")
        sentry_sdk.capture_exception(exception)

    def event_from_exception(self, exception) -> Dict:
        """This function format the exception by passing it through sentry
        Args:
            exception: an exception that will be passed to `sentry_sdk.capture_exception(exception)`

        Returns:
            the event that has been saved in `_before_send` method
        """
        self._logger.info(f"Event from exception: {exception}")

        if not exception:
            return {}

        with this_sentry_strategy(self, SentryStrategy.SEND_SUPPRESSED):
            sentry_sdk.capture_exception(exception)
            return self.last_event

    def set_user(self, user_id):
        """Set the user to identify the event on a Sentry server

        The algorithm is the following:
        1. Calculate hash from `user_id`.
        2. Generate fake user, based on the hash.

        No real `user_id` will be used in Sentry.

        Args:
            user_id: Real user id.

        Returns:
            Generated user (dictionary: {id, username}).
        """
        # calculate hash to keep real `user_id` in secret
        user_id_hash = md5(user_id).hexdigest()

        self._logger.debug(f"Set user: {user_id_hash}")

        Faker.seed(user_id_hash)
        user_name = Faker().name()
        user = {'id': user_id_hash, 'username': user_name}

        sentry_sdk.set_user(user)
        return user

    def get_actual_strategy(self):
        """This method is used to determine actual strategy.

        Strategy can be global: self.strategy
        and local: self._context_strategy.

        Returns: the local strategy if it is defined, the global strategy otherwise
        """
        strategy = self.thread_strategy.get()
        return strategy if strategy else self.global_strategy

    @staticmethod
    def get_test_sentry_url():
        return os.environ.get('TRIBLER_TEST_SENTRY_URL', None)

    @staticmethod
    def is_in_test_mode():
        return bool(SentryReporter.get_test_sentry_url())

    def _before_send(self, event: Optional[Dict],
                     hint: Optional[Dict]) -> Optional[Dict]:
        """The method that is called before each send. Both allowed and
        disallowed.

        The algorithm:
        1. If sending is allowed, then scrub the event and send.
        2. If sending is disallowed, then store the event in
            `self.last_event`

        Args:
            event: event that generated by Sentry
            hint: root exception (can be used in some cases)

        Returns:
            The event, prepared for sending, or `None`, if sending is suppressed.
        """
        if not event:
            return event

        # trying to get context-depending strategy first
        strategy = self.get_actual_strategy()

        self._logger.info(f"Before send strategy: {strategy}")

        exc_info = get_value(hint, 'exc_info')
        error_type = get_first_item(exc_info)

        if error_type in self.ignored_exceptions:
            self._logger.debug(f"Exception is in ignored: {hint}. Skipped.")
            return None

        if strategy == SentryStrategy.SEND_SUPPRESSED:
            self._logger.debug("Suppress sending. Storing the event.")
            self.last_event = event
            return None

        if strategy == SentryStrategy.SEND_ALLOWED_WITH_CONFIRMATION:
            self._logger.debug("Request confirmation.")
            if not self.get_confirmation(hint):
                return None

        # clean up the event
        self._logger.debug(
            f"Clean up the event with scrubber: {self.scrubber}")
        if self.scrubber:
            event = self.scrubber.scrub_event(event)

        return event
Ejemplo n.º 19
0
def run(coro):
    loop = asyncio.get_event_loop()
    return loop.run_until_complete(with_session(coro))


@contextmanager
def settings(**values):
    old_values = SETTINGS.get()
    try:
        token = SETTINGS.set(merge(old_values, values))
        yield
    finally:
        SETTINGS.reset(token)


settings.get = lambda param, default=None: SETTINGS.get().get(param, default)


def settings_middleware(what, *, after=None, before=None):
    assert not (after and before), "Should use either after or before param"

    if callable(what):
        what = [what]

    middleware = settings.get('middleware', [])
    i = _get_index(middleware, after or before)
    if after:
        i += 1
    return settings(middleware=middleware[:i] + what + middleware[i:])
settings.middleware = settings_middleware
Ejemplo n.º 20
0
Archivo: proxy.py Proyecto: ddqof/proxy
class ProxyServer:
    def __init__(self, port: int = 8080, block_images: bool = False, cfg=None):
        self.connection = ContextVar("connection")
        self.block_images = block_images
        self.port = port
        self._spent_data = {}
        if cfg is not None:
            if isinstance(cfg, dict):
                self._cfg = cfg
            else:
                raise ValueError(f"Config should be {dict.__name__} object")
            for rsc in chain(cfg["limited"], cfg["black-list"]):
                self._spent_data[rsc] = 0
        self.context_token = None

    async def run(self):
        """
        Launch async proxy-server at specified host and port.
        """
        srv = await asyncio.start_server(self._handle_connection, LOCALHOST,
                                         self.port)

        addr = srv.sockets[0].getsockname()
        LOGGER.info(START_SERVER_MSG.format(app_address=addr))

        async with srv:
            await srv.serve_forever()

    async def _handle_connection(self, client_reader: StreamReader,
                                 client_writer: StreamWriter) -> None:
        """
        Handle every client response.
        Called whenever a new connection is established.
        """
        try:
            raw_request = await client_reader.read(CHUNK_SIZE)
            print(raw_request)
            await client_writer.drain()
            if not raw_request:
                return
            pr = ProxyRequest(raw_request, self._cfg)
            LOGGER.info(f"{pr.method:<{len('CONNECT')}} " f"{pr.abs_url}")
            try:
                server_reader, server_writer = await asyncio.open_connection(
                    pr.hostname, pr.port)
            except OSError:
                LOGGER.info(
                    CONNECTION_REFUSED_MSG.format(method=pr.method,
                                                  url=pr.abs_url))
                return
            client_endpoint = Endpoint(client_reader, client_writer)
            server_endpoint = Endpoint(server_reader, server_writer)
            conn = Connection(client_endpoint, server_endpoint, pr,
                              self.block_images)
            self.context_token = self.connection.set(conn)
            if self.block_images and pr.is_image_request:
                await self.connection.get().reset()
                return
            if pr.scheme is HTTPScheme.HTTPS:
                await self._handle_https()
            else:
                await self._handle_http()
        except Exception as e:
            if isinstance(e, ConnectionResetError):
                LOGGER.info(CONNECTION_CLOSED_MSG.format(url=pr.abs_url))
            else:
                LOGGER.exception(e)
                asyncio.get_event_loop().stop()
            if self.context_token is not None:
                self.connection.reset(self.context_token)

    async def _handle_http(self) -> None:
        """
        Send HTTP request and then forwards the following HTTP requests.
        """
        conn = self.connection.get()
        LOGGER.debug(
            HANDLING_HTTP_REQUEST_MSG.format(method=conn.pr.method,
                                             url=conn.pr.abs_url))
        await conn.server.write_and_drain(conn.pr.raw)
        await asyncio.gather(conn.forward_to_client(self._spent_data),
                             conn.forward_to_server())

    async def _handle_https(self) -> None:
        """
        Handles https connection by making HTTP tunnel.
        """
        conn = self.connection.get()
        hostname = conn.pr.hostname
        LOGGER.debug(HANDLING_HTTPS_CONNECTION_MSG.format(url=hostname))
        rsc = conn.pr.restriction
        if rsc:
            if self._spent_data[rsc.initiator] >= rsc.data_limit:
                await conn.reset()
                return
        await conn.client.write_and_drain(CONNECTION_ESTABLISHED_HTTP_MSG)
        LOGGER.debug(CONNECTION_ESTABLISHED_MSG.format(url=conn.pr.abs_url))
        await asyncio.gather(conn.forward_to_server(),
                             conn.forward_to_client(self._spent_data))
Ejemplo n.º 21
0
def trace_stack_top(trace_stack_var: ContextVar) -> Any | None:
    """Return the element at the top of a trace stack."""
    trace_stack = trace_stack_var.get()
    return trace_stack[-1] if trace_stack else None
Ejemplo n.º 22
0
    # 'var' was set to 'spam' before
    # calling 'copy_context()' and 'ctx.run(main)', so:
    # var.get() == ctx[var] == 'spam'

    var.set('ham')
    print('insite:',var.get())

    # Now, after setting 'var' to 'ham':
    # var.get() == ctx[var] == 'ham'

ctx = copy_context()

# Any changes that the 'main' function makes to 'var'
# will be contained in 'ctx'.
ctx.run(main)
print('outsite:',var.get())


def main1():
    var.set('ham')
    print('insite2:',var.get())

main1()
print('outsite2:',var.get())

# The 'main()' function was run in the 'ctx' context,
# so changes to 'var' are contained in it:
# ctx[var] == 'ham'

# However, outside of 'ctx', 'var' is still set to 'spam':
# var.get() == 'spam'
Ejemplo n.º 23
0
class Database(LimitInstances):
    """Represents a database."""

    __instances__: dict[str, Database]

    def __init__(self, name: str):
        self.name = name
        self.user: t.Optional[str] = None
        self.url: t.Optional[str] = None

        self.pool: t.Optional[asyncpg.Pool] = None

        self.type = types
        self.schemas: t.Set[Schema] = set()

        self._mock = False
        self._prepared = False
        self._tracking = ContextVar(f"stmt_tracking:{name}")

    @classmethod
    def connect(cls, name: str, user: str, password: str, *, host: str = "localhost", port: int = 5432) -> Database:
        """Establish the connection URL and name for the database, returning the instance representing it."""
        if len(cls.__instances__) == 1:
            db = cls.__instances__["__default__"]
            cls.__instances__[name] = db
            db.name = name
        else:
            db = Database(name)
        db.user = user
        db.url = f"postgres://{user}:{password}@{host}:{port}/{name}"
        return db

    @property
    def public_schema(self):
        return self.Schema("public")

    def __call__(self, name: str) -> Database:
        """Return the instance representing the given database name."""
        return Database(name)

    def __str__(self):
        """Return the URL representation of the given database instance, if set."""
        return self.url or self.name

    def __repr__(self):
        status = " disabled" if self._mock else ""
        if self.user:
            return f"<Database '{self.name}' user='******'{status}>"
        else:
            return f"<Database '{self.name}'{status}>"

    def __hash__(self):
        return hash(str(self))

    def __eq__(self, other: t.Any):
        if isinstance(other, Database):
            return str(self) == str(other)
        return False

    def __getitem__(self, name: str) -> Database:
        """Retrieve an existing database with the given name."""
        return self.__instances__[name]

    def __delitem__(self, name: str):
        """Delete a database instance by it's name."""
        del self.__instances__[name]

    @classmethod
    def get_default(cls):
        return cls.__instances__["__default__"]

    async def create_pool(self):
        """Create the asyncpg connection pool for this database connection to use."""
        if self.pool:
            self.pool.close()
        if not self.url:
            raise DBError("Please define a connection with Database.connect.")
        self.pool = await asyncpg.create_pool(self.url, init=self._enable_json)  # pragma: no cover

    @staticmethod
    async def _enable_json(conn: asyncpg.Connection):  # pragma: no cover
        await conn.set_type_codec("jsonb", encoder=json.dumps, decoder=json.loads, schema="pg_catalog")
        await conn.set_type_codec("json", encoder=json.dumps, decoder=json.loads, schema="pg_catalog")

    async def prepare(self):
        """Prepare all child objects for this database."""
        for schema in self.schemas:
            await schema.prepare()
        self._prepared = True

    def disable_execution(self):
        """Return generated SQL without executing when Database.execute is used."""
        self._mock = True

    def enable_execution(self):
        """Sets Database.execute to it's normal execution behaviour."""
        self._mock = False

    @contextlib.contextmanager
    def stmt_tracking(self):
        """Collects raw executed statements until exit when execution is disabled."""
        ctx_token = self._tracking.set([])
        try:
            yield self
        finally:
            self._tracking.reset(ctx_token)

    async def close(self):  # pragma: no cover
        """Close the asyncpg connection pool for this database."""
        if self.pool:
            await self.pool.close()

    async def execute(self, sql: str, *args, timeout: t.Optional[float] = None) -> t.Union[str, tuple[str, t.Any]]:
        """Execute an SQL statement."""
        if self._mock:
            try:
                stmt_list = self._tracking.get()
                stmt_list.append((sql, args))
            except LookupError:
                pass
            if not args:
                return sql
            else:
                return sql, *args

        if not self.pool:  # pragma: no cover
            await self.create_pool()
        return await self.pool.execute(sql, *args, timeout=timeout)  # pragma: no cover

    def Schema(self, name: str) -> Schema:
        """Return a bound Schema for this database."""
        s = Schema(name, self)
        self.schemas.add(s)
        return s

    def Table(self, name: str) -> Table:
        """Return a bound Table for the public schema on this database."""
        return Table(name, self)
Ejemplo n.º 24
0
class Kanata(BaseDispatcher):
    "彼方."

    always = True  # 兼容重构版的 bcc.

    signature_list: List[Union[NormalMatch, PatternReceiver]]
    stop_exec_if_fail: bool = True

    parsed_items: ContextVar[Dict[str, MessageChain]]

    def __init__(self,
                 signature_list: List[Union[NormalMatch, PatternReceiver]],
                 stop_exec_if_fail: bool = True) -> None:
        """该魔法方法用于实例化该参数解析器.

        Args:
            signature_list (List[Union[NormalMatch, PatternReceiver]]): 匹配标识链
            stop_exec_if_fail (bool, optional): 是否在无可用匹配时停止监听器执行. Defaults to True.
        """
        self.signature_list = signature_list
        self.stop_exec_if_fail = stop_exec_if_fail
        self.parsed_items = ContextVar("kanata_parsed_items")

    @staticmethod
    def detect_index(
        signature_chain: Tuple[Union[NormalMatch, PatternReceiver]],
        message_chain: MessageChain
    ) -> Optional[Dict[str, Tuple[MessageIndex, MessageIndex]]]:
        merged_chain = merge_signature_chain(signature_chain)
        message_chain = message_chain.asMerged()

        reached_message_index: MessageIndex = (0, None)
        # [0] => real_index
        # [1] => text_index(optional)

        start_index: MessageIndex = (0, None)

        match_result: Dict[Arguments, Tuple[MessageIndex,  # start(include)
                                            MessageIndex  # stop(exclude)
                                            ]] = {}

        signature_iterable = InsertGenerator(enumerate(merged_chain))
        latest_index = None
        matching_recevier: Optional[Arguments] = None

        for signature_index, signature in signature_iterable:
            if isinstance(signature, (Arguments, PatternReceiver)):
                if matching_recevier:  # 已经选中了一个...
                    if isinstance(signature, Arguments):
                        if latest_index == signature_index:
                            matching_recevier.content.extend(signature.content)
                            continue
                        else:
                            raise TypeError(
                                "a unexpected case: match conflict")
                    if isinstance(signature, PatternReceiver):
                        matching_recevier.content.append(signature)
                        continue
                else:
                    if isinstance(signature, PatternReceiver):
                        signature = Arguments([signature])
                matching_recevier = signature
                start_index = reached_message_index
            elif isinstance(signature, NormalMatch):
                if not matching_recevier:
                    # 如果不要求匹配参数, 从当前位置(reached_message_index)开始匹配FullMatch.
                    current_chain = message_chain.subchain(
                        slice(reached_message_index, None, None))
                    if not current_chain.__root__:  # index 越界
                        return
                    if not isinstance(current_chain.__root__[0], Plain):
                        # 切片后第一个 **不是** Plain.
                        return
                    re_match_result = re.match(signature.operator(),
                                               current_chain.__root__[0].text)
                    if not re_match_result:
                        # 不匹配的
                        return
                    # 推进当前进度.
                    plain_text_length = len(current_chain.__root__[0].text)
                    pattern_length = re_match_result.end(
                    ) - re_match_result.start()
                    if (pattern_length + 1) > plain_text_length:
                        # 不推进 text_index 进度, 转而推进 element_index 进度
                        reached_message_index = (reached_message_index[0] + 1,
                                                 None)
                    else:
                        # 推进 element_index 进度至已匹配到的地方后.
                        reached_message_index = (
                            reached_message_index[0],
                            origin_or_zero(reached_message_index[1]) +
                            re_match_result.start() + pattern_length)
                else:
                    # 需要匹配参数(是否贪婪模式查找, 即是否从后向前)
                    greed = matching_recevier.isGreed
                    for element_index, element in \
                            enumerate(message_chain.subchain(slice(reached_message_index, None, None)).__root__):
                        if isinstance(element, Plain):
                            current_text: str = element.text
                            # 完成贪婪判断
                            text_find_result_list = list(
                                re.finditer(signature.operator(),
                                            current_text))
                            if not text_find_result_list:
                                continue
                            text_find_result = text_find_result_list[-int(greed
                                                                          )]
                            if not text_find_result:
                                continue
                            text_find_index = text_find_result.start()

                            # 找到了! 这里不仅要推进进度, 还要把当前匹配的参数记录结束位置并清理.
                            stop_index = (
                                reached_message_index[0] + element_index +
                                int(element_index == 0),
                                origin_or_zero(reached_message_index[1]) +
                                text_find_index)
                            match_result[matching_recevier] = (
                                copy.copy(start_index), stop_index)

                            start_index = (0, None)
                            matching_recevier = None

                            pattern_length = text_find_result.end(
                            ) - text_find_result.start()
                            if current_text == text_find_result.string[slice(
                                    *text_find_result.span())]:
                                # 此处是如果推进 text_index 就会被爆破....
                                # 推进 element_index 而不是 text_index
                                reached_message_index = (
                                    reached_message_index[0] + element_index +
                                    int(element_index != 0), None)
                            else:
                                reached_message_index = (
                                    reached_message_index[0] + element_index,
                                    origin_or_zero(reached_message_index[1]) +
                                    text_find_index + pattern_length)
                            break
                    else:
                        # 找遍了都没匹配到.
                        return
            latest_index = signature_index
        else:
            if matching_recevier:  # 到达了终点, 却仍然还要做点事的.
                # 计算终点坐标.
                text_index = None

                latest_element = message_chain.__root__[-1]
                if isinstance(latest_element, Plain):
                    text_index = len(latest_element.text)

                stop_index = (len(message_chain.__root__), text_index)
                match_result[matching_recevier] = (start_index, stop_index)
        return match_result

    @staticmethod
    def detect_and_mapping(
        signature_chain: Tuple[Union[NormalMatch, PatternReceiver]],
        message_chain: MessageChain
    ) -> Optional[Dict[Arguments, MessageChain]]:
        match_result = Kanata.detect_index(signature_chain, message_chain)
        if match_result is not None:
            return {
                k: message_chain[v[0]:(v[1][0], (
                    v[1][1] - (origin_or_zero(v[0][1]) if any([
                        v[0][0] + 1 == v[1][0], v[0][0] == v[1][0], v[0][0] -
                        1 == v[1][0]
                    ]) else 0)) if v[1][1] is not None else None)]
                for k, v in match_result.items()
            }

    @staticmethod
    def allocation(
        mapping: Dict[Arguments, MessageChain]
    ) -> Optional[Dict[str, MessageChain]]:
        if mapping is None:
            return None
        result = {}
        for arguemnt_set, message_chain in mapping.items():
            length = len(arguemnt_set.content)
            for index, receiver in enumerate(arguemnt_set.content):
                if receiver.name in result:
                    raise ConflictItem(
                        '{0} is defined repeatedly'.format(receiver))
                if isinstance(receiver, RequireParam):
                    if not message_chain.__root__:
                        return
                    result[receiver.name] = message_chain
                elif isinstance(receiver, OptionalParam):
                    if not message_chain.__root__:
                        result[receiver.name] = None
                    else:
                        result[receiver.name] = message_chain
                break  # 还没来得及做长度匹配...
        return result

    @lru_cache(None)
    def catch_argument_names(self) -> List[str]:
        return [
            i.name for i in self.signature_list
            if isinstance(i, PatternReceiver)
        ]

    async def catch(self, interface: DispatcherInterface):
        # 因为 Dispatcher 的特性, 要用 yield (自动清理 self.parsed_items)
        token = None
        if self.parsed_items.get(None) is None:
            message_chain = (await interface.execute_with(
                "__kanata_messagechain_origin__", MessageChain,
                None)).exclude(Source, Quote, Xml, Json, App, Poke)
            mapping_result = self.detect_and_mapping(self.signature_list,
                                                     message_chain)
            if mapping_result is not None:
                token = self.parsed_items.set(self.allocation(mapping_result))
            else:
                if self.stop_exec_if_fail:
                    raise ExecutionStop()

        _i = random.random()
        result = self.parsed_items.get({}).get(interface.name, _i)
        if result is _i:
            yield  # 跳过.(另: Executor 应加入对 default 的不可预测行为反制措施.)
        else:
            yield Force(result)
        if token is not None:
            self.parsed_items.reset(token)
Ejemplo n.º 25
0
class Database:
    SUPPORTED_BACKENDS = {
        "postgresql": "databases.backends.postgres:PostgresBackend",
        "mysql": "databases.backends.mysql:MySQLBackend",
        "sqlite": "databases.backends.sqlite:SQLiteBackend",
    }

    def __init__(self,
                 url: typing.Union[str, "DatabaseURL"],
                 *,
                 force_rollback: bool = False):
        self._url = DatabaseURL(url)
        self._force_rollback = force_rollback
        self.is_connected = False

        backend_str = self.SUPPORTED_BACKENDS[self._url.dialect]
        backend_cls = import_from_string(backend_str)
        assert issubclass(backend_cls, DatabaseBackend)
        self._backend = backend_cls(self._url)

        # Connections are stored as task-local state.
        self._connection_context = ContextVar(
            "connection_context")  # type: ContextVar

        # When `force_rollback=True` is used, we use a single global
        # connection, within a transaction that always rolls back.
        self._global_connection = None  # type: typing.Optional[Connection]
        self._global_transaction = None  # type: typing.Optional[Transaction]

        if self._force_rollback:
            self._global_connection = Connection(self._backend)
            self._global_transaction = self._global_connection.transaction(
                force_rollback=True)

    async def connect(self) -> None:
        """
        Establish the connection pool.
        """
        assert not self.is_connected, "Already connected."

        await self._backend.connect()
        self.is_connected = True

        if self._force_rollback:
            assert self._global_transaction is not None
            await self._global_transaction.__aenter__()

    async def disconnect(self) -> None:
        """
        Close all connections in the connection pool.
        """
        assert self.is_connected, "Already disconnected."

        if self._force_rollback:
            assert self._global_transaction is not None
            await self._global_transaction.__aexit__()

        await self._backend.disconnect()
        self.is_connected = False

    async def __aenter__(self) -> "Database":
        await self.connect()
        return self

    async def __aexit__(
        self,
        exc_type: typing.Type[BaseException] = None,
        exc_value: BaseException = None,
        traceback: TracebackType = None,
    ) -> None:
        await self.disconnect()

    async def fetch_all(self, query: ClauseElement) -> typing.List[RowProxy]:
        async with self.connection() as connection:
            return await connection.fetch_all(query=query)

    async def fetch_one(self, query: ClauseElement) -> RowProxy:
        async with self.connection() as connection:
            return await connection.fetch_one(query=query)

    async def execute(self,
                      query: ClauseElement,
                      values: dict = None) -> typing.Any:
        async with self.connection() as connection:
            return await connection.execute(query=query, values=values)

    async def execute_many(self, query: ClauseElement, values: list) -> None:
        async with self.connection() as connection:
            return await connection.execute_many(query=query, values=values)

    async def iterate(
            self,
            query: ClauseElement) -> typing.AsyncGenerator[RowProxy, None]:
        async with self.connection() as connection:
            async for record in connection.iterate(query):
                yield record

    def connection(self) -> "Connection":
        if self._global_connection is not None:
            return self._global_connection

        try:
            return self._connection_context.get()
        except LookupError:
            connection = Connection(self._backend)
            self._connection_context.set(connection)
            return connection

    def transaction(self, *, force_rollback: bool = False) -> "Transaction":
        return self.connection().transaction(force_rollback=force_rollback)
Ejemplo n.º 26
0
class Broker(dramatiq.Broker):  # pylint: disable=abstract-method
    """Dramatiq broker using sqlalchemy to process messages."""
    def __init__(self, middleware=None, settings=None):
        super().__init__(middleware=middleware)

        self.__settings = settings or Settings()
        self.__db_engine = db_engine(self.__settings)
        self.__shared_session = ContextVar("shared_session")
        self.__broker_id = uuid.uuid4()
        self.__queue_events = defaultdict(Event)

        # We have some actors using this, so it's always enabled.
        self.add_middleware(CurrentMessage())

        # Enable special handling of actors with 'scheduled=True' in options.
        self.add_middleware(
            SchedulerMiddleware(self.__settings, self.__db_engine))

        self.add_middleware(LocalNotifyMiddleware())

        # This is postgres-specific, so...
        if "postgresql" in str(self.__db_engine.url):
            self.add_middleware(PostgresNotifyMiddleware(self.__db_engine))

    def set_session(self, session):
        """Set an sqlalchemy session for use with the broker.

        A session should be set during handling of any HTTP requests.
        It will ensure that enqueues take place in the same transaction
        as other changes made during handling of that request.

        In other contexts, e.g. one dramatiq actor invoking another,
        the session can be safely left unset and the broker will manage
        its own session as needed.
        """
        self.__shared_session.set(session)

    @property
    def session(self):
        return self.__shared_session.get(None)

    def notify(self):
        """Notify all consumers that something might have changed.

        Consumers are called in a loop and they will sleep between iterations.
        Calling this method will wake any sleeping consumers so that new messages
        can be found earlier.
        """
        queues = self.get_declared_queues().union(
            self.get_declared_delay_queues())
        for queue_name in queues:
            self.__queue_events[queue_name].set()

    def declare_queue(self, queue_name):
        if queue_name not in self.queues:
            self.emit_before("declare_queue", queue_name)
            self.queues[queue_name] = None
            self.emit_after("declare_queue", queue_name)

            delayed_name = dq_name(queue_name)
            self.queues[delayed_name] = None
            self.delay_queues.add(delayed_name)
            self.emit_after("declare_delay_queue", delayed_name)

    def consume(self, queue_name, prefetch=1, timeout=30000):
        consumer_id = "%s-%s" % (queue_name, self.__broker_id)

        # We need one (arbitrarily selected) consumer to act as the
        # "master" which will take on additional maintenance duties.
        # We'll have one consumer per queue and it would be wasteful
        # to let all of them do this.
        master = queue_name == list(self.queues.keys())[0]

        return Consumer(
            queue_name,
            db_engine=self.__db_engine,
            consumer_id=consumer_id,
            prefetch=prefetch,
            master=master,
            queue_event=self.__queue_events[queue_name],
            settings=self.__settings,
        )

    def enqueue_using_session(self, db, message, delay=None):
        # Given a dramatiq message, saves it to the queue in the DB.
        queue_name = message.queue_name

        if delay is not None:
            queue_name = dq_name(queue_name)
            message.options["eta"] = current_millis() + delay

        db_message = DramatiqMessage(id=message.message_id,
                                     actor=message.actor_name,
                                     queue=queue_name)

        message_dict = message.asdict()

        # Drop these so we're not storing them in two places.
        del message_dict["message_id"]
        del message_dict["queue_name"]
        del message_dict["actor_name"]

        db_message.body = message_dict

        # Use merge rather than add since the message may already exist;
        # for instance this happens in case of retry.
        db_message = db.merge(db_message)

        # Explicitly wipe out the consumer, since if we've updated an existing
        # message it'll have to be consumed again.
        db_message.consumer_id = None

    def enqueue(self, message, *, delay=None):
        self.emit_before("enqueue", message, delay)

        if db := self.session:
            # We have a shared session, e.g. we're in an http request handler.
            # We reuse the session and don't commit ourselves.
            self.enqueue_using_session(db, message, delay)
            self.emit_after("enqueue", message, delay)
            return message

        # We don't have a shared session e.g.
        # - an automated test not using a real app
        # - testing from python CLI
        # - one actor invoking another
        # Then we use our own short-lived session and commit.
        db = Session(bind=self.__db_engine)
        try:
            self.enqueue_using_session(db, message, delay)
            db.commit()
            self.emit_after("enqueue", message, delay)
        finally:
            db.close()

        return message
Ejemplo n.º 27
0
class Database:
    SUPPORTED_BACKENDS = {
        "postgresql": "databases.backends.postgres:PostgresBackend",
        "postgresql+aiopg": "databases.backends.aiopg:AiopgBackend",
        "postgres": "databases.backends.postgres:PostgresBackend",
        "mysql": "databases.backends.mysql:MySQLBackend",
        "sqlite": "databases.backends.sqlite:SQLiteBackend",
    }

    def __init__(
        self,
        url: typing.Union[str, "DatabaseURL"],
        *,
        force_rollback: bool = False,
        **options: typing.Any,
    ):
        self.url = DatabaseURL(url)
        self.options = options
        self.is_connected = False

        self._force_rollback = force_rollback

        backend_str = self.SUPPORTED_BACKENDS[self.url.scheme]
        backend_cls = import_from_string(backend_str)
        assert issubclass(backend_cls, DatabaseBackend)
        self._backend = backend_cls(self.url, **self.options)

        # Connections are stored as task-local state.
        self._connection_context = ContextVar(
            "connection_context")  # type: ContextVar

        # When `force_rollback=True` is used, we use a single global
        # connection, within a transaction that always rolls back.
        self._global_connection = None  # type: typing.Optional[Connection]
        self._global_transaction = None  # type: typing.Optional[Transaction]

    async def connect(self) -> None:
        """
        Establish the connection pool.
        """
        assert not self.is_connected, "Already connected."

        await self._backend.connect()
        logger.info("Connected to database %s",
                    self.url.obscure_password,
                    extra=CONNECT_EXTRA)
        self.is_connected = True

        if self._force_rollback:
            assert self._global_connection is None
            assert self._global_transaction is None

            self._global_connection = Connection(self._backend)
            self._global_transaction = self._global_connection.transaction(
                force_rollback=True)

            await self._global_transaction.__aenter__()

    async def disconnect(self) -> None:
        """
        Close all connections in the connection pool.
        """
        assert self.is_connected, "Already disconnected."

        if self._force_rollback:
            assert self._global_connection is not None
            assert self._global_transaction is not None

            await self._global_transaction.__aexit__()

            self._global_transaction = None
            self._global_connection = None

        await self._backend.disconnect()
        logger.info(
            "Disconnected from database %s",
            self.url.obscure_password,
            extra=DISCONNECT_EXTRA,
        )
        self.is_connected = False

    async def __aenter__(self) -> "Database":
        await self.connect()
        return self

    async def __aexit__(
        self,
        exc_type: typing.Type[BaseException] = None,
        exc_value: BaseException = None,
        traceback: TracebackType = None,
    ) -> None:
        await self.disconnect()

    async def fetch_all(self,
                        query: typing.Union[ClauseElement, str],
                        values: dict = None) -> typing.List[typing.Mapping]:
        async with self.connection() as connection:
            return await connection.fetch_all(query, values)

    async def fetch_one(
            self,
            query: typing.Union[ClauseElement, str],
            values: dict = None) -> typing.Optional[typing.Mapping]:
        async with self.connection() as connection:
            return await connection.fetch_one(query, values)

    async def fetch_val(
        self,
        query: typing.Union[ClauseElement, str],
        values: dict = None,
        column: typing.Any = 0,
    ) -> typing.Any:
        async with self.connection() as connection:
            return await connection.fetch_val(query, values, column=column)

    async def execute(self,
                      query: typing.Union[ClauseElement, str],
                      values: dict = None) -> typing.Any:
        async with self.connection() as connection:
            return await connection.execute(query, values)

    async def execute_many(self, query: typing.Union[ClauseElement, str],
                           values: list) -> None:
        async with self.connection() as connection:
            return await connection.execute_many(query, values)

    async def iterate(self,
                      query: typing.Union[ClauseElement, str],
                      values: dict = None
                      ) -> typing.AsyncGenerator[typing.Mapping, None]:
        async with self.connection() as connection:
            async for record in connection.iterate(query, values):
                yield record

    def connection(self) -> "Connection":
        if self._global_connection is not None:
            return self._global_connection

        try:
            return self._connection_context.get()
        except LookupError:
            connection = Connection(self._backend)
            self._connection_context.set(connection)
            return connection

    def transaction(self,
                    *,
                    force_rollback: bool = False,
                    **kwargs: typing.Any) -> "Transaction":
        return Transaction(self.connection,
                           force_rollback=force_rollback,
                           **kwargs)

    @contextlib.contextmanager
    def force_rollback(self) -> typing.Iterator[None]:
        initial = self._force_rollback
        self._force_rollback = True
        try:
            yield
        finally:
            self._force_rollback = initial
Ejemplo n.º 28
0
class AsyncPostgresSQL:
    __slots__ = ('_ctxvar', '_pool', '_pool_kwargs', '_listener')

    def __init__(self, dsn=None, min_size=10, max_size=10, on_init_conn=None):
        """
        Define settings to establish a connection to a PostgreSQL server.

        The connection parameters may be specified either as a connection
        URI in *dsn*, or as specific keyword arguments, or both.
        If both *dsn* and keyword arguments are specified, the latter
        override the corresponding values parsed from the connection URI.

        :param dsn:
            Connection arguments specified using as a single string in the
            `[https://www.postgresql.org/docs/current/libpq-connect.html
            #id-1.7.3.8.3.6](libpq connection URI format)`_:
            ``postgres://user:password@host:port/database?option=value``.

        """
        if not on_init_conn:
            on_init_conn = _init_connection

        self._pool_kwargs = dict(dsn=dsn,
                                 min_size=min_size,
                                 max_size=max_size,
                                 init=on_init_conn)
        self._ctxvar = ContextVar('connection')

        self._listener = None

    def transaction(self, *d_args, renew=False, autocommit=False):
        """Decorate the function to access datasbase.

        :param renew: Force the function with a new connection.
        :param autocommit: autocommit
        """
        def _sqlblk_decorator(func):
            async def _sqlblock_wrapper(*args, **kwargs):
                ctxvar = self._ctxvar
                pool = self._pool
                if pool is None:
                    raise ValueError('pool is none')

                block = ctxvar.get(None)
                if block is None or renew:
                    conn = None
                    try:
                        conn = await pool.acquire()
                        if conn is None:
                            conn_dsn = self._pool_kwargs.get("dsn")
                            errmsg = (
                                f"unavailable connection '{conn_dsn}' "
                                f"to invoke sql block '{func.__module__}.{func.__name__}'"
                            )
                            raise UnavailableConnectionException(errmsg)

                        block = SQLBlock(conn, autocommit=autocommit)
                        return await _scoped_invoke(ctxvar, block, conn,
                                                    autocommit, func, args,
                                                    kwargs)
                    finally:
                        if pool and conn:
                            await pool.release(conn)
                else:
                    conn = block._conn
                    childBlock = SQLBlock(conn,
                                          parent=block,
                                          autocommit=autocommit)

                    return await _scoped_invoke(ctxvar, childBlock, conn,
                                                autocommit, func, args, kwargs)

            return update_func_wrapper(_sqlblock_wrapper, func)

        if len(d_args) > 0 and iscoroutinefunction(d_args[0]):
            # no argument decorator
            return _sqlblk_decorator(d_args[0])
        else:
            return lambda f: _sqlblk_decorator(f)

    async def __aenter__(self):
        """ startup the connection pool """
        self._pool = LazyConnectionPool(**self._pool_kwargs)
        # self._pool = create_pool(**self._pool_kwargs)
        await self._pool.__aenter__()

        self._listener = Listener(self._pool)

        return self

    async def __aexit__(self, etyp, exc_val, tb):
        """ gracefull shutdown the connection pool """

        if self._listener is not None:
            await self._listener.close()
            self._listener = None

        await self._pool.__aexit__()
        self._pool = None

    # def __call__(self, *sqltexts, **params):
    #     if not params:
    #         params = _get_ctx_frame(1).f_locals

    #     sqlblock = self._sqlblock
    #     for sqltext in sqltexts:
    #         sqlblock.join(sqltext, vars=params)

    #     return self

    def sql(self, *sqltexts, **params):
        if not params:
            params = _get_ctx_frame(1).f_locals

        sqlblock = self._sqlblock
        for sqltext in sqltexts:
            sqlblock.join(sqltext, vars=params)

        return self

    def __lshift__(self, sqltext):
        self._sqlblock.join(sqltext, vars=_get_ctx_frame(1).f_locals)

        return self

    async def execute(self, **params):
        return await self._sqlblock.fetch(**params)

    def __await__(self):
        return self._sqlblock.fetch().__await__()

    async def fetch_first(self, **params):
        return await self._sqlblock.fetch_first(**params)

    async def first(self, **params):
        return await self._sqlblock.fetch_first(**params)

    def __aiter__(self):
        return self._sqlblock.__aiter__()

    async def listen(self, channel):
        """ listen for Postgres notifications

        The returned value is the payload of notification

        :param str channel: Channel to listen on.
        """

        return await self._listener.get(channel)

    async def notify(self, channel, payload):
        await self._sqlblock._conn.execute("NOTIFY $1 $2", channel, payload)

    @property
    def _sqlblock(self) -> SQLBlock:
        """Get sqlblock in context"""
        sqlblock = self._ctxvar.get()
        return sqlblock