Exemple #1
0
class RateLimitAggregator(AbstractContextManager):  # type: ignore
    """
    Runs the rate limits provided by the `rate_limit_params` configuration object.

    It runs the rate limits in the order described by `rate_limit_params`.
    """
    def __init__(self,
                 rate_limit_params: Sequence[RateLimitParameters]) -> None:
        self.rate_limit_params = rate_limit_params
        self.stack = ExitStack()

    def __enter__(self) -> RateLimitStatsContainer:
        stats = RateLimitStatsContainer()

        for rate_limit_param in self.rate_limit_params:
            child_stats = self.stack.enter_context(
                rate_limit(rate_limit_param))
            if child_stats:
                stats.add_stats(rate_limit_param.rate_limit_name, child_stats)

        return stats

    def __exit__(
        self,
        exc_type: Optional[Type[BaseException]],
        exc_val: Optional[BaseException],
        exc_tb: Optional[TracebackType],
    ) -> None:
        self.stack.pop_all().close()
Exemple #2
0
class GitHubApi:
    def __init__(self, auth_token=None, url_base=DEFAULT_URL_BASE):
        self._establish_connection = partial(Connection, url_base,
                                             auth_token=auth_token)
        self._connection = None
        self._exit_stack = ExitStack()

    def __enter__(self):
        self._connection = self._exit_stack.enter_context(
            self._establish_connection()
        )
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self._connection = None
        self._exit_stack.pop_all()
        self._exit_stack = None

    def get_repository_api(self, fullname):
        if not self._connection:
            raise RuntimeError(
                'GitHubApi should be used within context manager flow'
            )
        get_repo = self._connection.get(f'{self._connection.url_base}/repos/{fullname}')
        return RepositoryApi(self._connection, get_repo.json())
Exemple #3
0
class RateLimitAggregator(AbstractContextManager):  # type: ignore
    """
    Runs the rate limits provided by the `rate_limit_params` configuration object.

    It runs the rate limits in the order described by `rate_limit_params`.
    """
    def __init__(self,
                 rate_limit_params: Sequence[RateLimitParameters]) -> None:
        self.rate_limit_params = rate_limit_params
        self.stack = ExitStack()

    def __enter__(self) -> RateLimitStatsContainer:
        stats = RateLimitStatsContainer()

        for rate_limit_param in self.rate_limit_params:
            try:
                child_stats = self.stack.enter_context(
                    rate_limit(rate_limit_param))
                if child_stats:
                    stats.add_stats(rate_limit_param.rate_limit_name,
                                    child_stats)
            except RateLimitExceeded as e:
                # If an exception occurs in one of the rate limiters, the __exit__ callbacks are not
                # called since the error happened in the __enter__ method and not in the context
                # block itself. In the case that one of the rate limiters caught a limit, we need
                # these exit functions to be called so we can roll back any limits that were set
                # earlier in the stack.
                self.__exit__(*sys.exc_info())
                _record_metrics(e, rate_limit_param)
                raise e

        return stats

    def __exit__(
        self,
        exc_type: Optional[Type[BaseException]],
        exc_val: Optional[BaseException],
        exc_tb: Optional[TracebackType],
    ) -> None:
        self.stack.pop_all().close()
Exemple #4
0
class atomically_replaced_file:
    def __init__(self, path, mode='w', buffering=-1):
        """Return a context manager supporting the atomic replacement of a file.

        The context manager yields an open file object that has been
        created in a mkdtemp-style temporary directory in the same
        directory as the path.  The temporary file will be renamed to
        the target path (atomically if the platform allows it) if
        there are no exceptions, and the temporary directory will
        always be removed.  Calling cancel() will prevent the
        replacement.

        The file object will have a name attribute containing the
        file's path, and the mode and buffering arguments will be
        handled exactly as with open().  The resulting permissions
        will also match those produced by open().

        E.g.::

          with atomically_replaced_file('foo.txt', 'w') as f:
              f.write('hello jack.')

        """
        assert 'w' in mode
        self.path = path
        self.mode = mode
        self.buffering = buffering
        self.canceled = False
        self.tmp_path = None
        self.cleanup = ExitStack()

    def __enter__(self):
        with self.cleanup:
            parent, name = os.path.split(self.path)
            tmpdir = self.cleanup.enter_context(
                temp_dir(dir=parent, prefix=name + b'-'))
            self.tmp_path = tmpdir + b'/pending'
            f = open(self.tmp_path, mode=self.mode, buffering=self.buffering)
            f = self.cleanup.enter_context(f)
            self.cleanup = self.cleanup.pop_all()
            return f

    def __exit__(self, exc_type, exc_value, traceback):
        with self.cleanup:
            if not (self.canceled or exc_type):
                os.rename(self.tmp_path, self.path)

    def cancel(self):
        self.canceled = True
Exemple #5
0
class Scope:
    """
    A context manager that allows to register error and exit callbacks.
    """

    _thread_locals = threading.local()

    @frozen
    class _ExitHandler:
        callback: Callable[[], Any]
        ignore_errors: bool = True

        def __exit__(self, exc_type, exc_value, exc_traceback):
            try:
                self.callback()
            except Exception:
                if not self.ignore_errors:
                    raise

    @frozen
    class _ErrorHandler(_ExitHandler):
        def __exit__(self, exc_type, exc_value, exc_traceback):
            if exc_type:
                return super().__exit__(exc_type=exc_type,
                                        exc_value=exc_value,
                                        exc_traceback=exc_traceback)

    def __init__(self):
        self._stack = ExitStack()
        self.enabled = True

    def on_error_do(self,
                    callback: Callable,
                    *args,
                    kwargs: Optional[Dict[str, Any]] = None,
                    ignore_errors: bool = False):
        """
        Registers a function to be called on scope exit because of an error.

        If ignore_errors is True, the errors from this function call
        will be ignored.
        """

        self._register_callback(self._ErrorHandler,
                                ignore_errors=ignore_errors,
                                callback=callback,
                                args=args,
                                kwargs=kwargs)

    def on_exit_do(self,
                   callback: Callable,
                   *args,
                   kwargs: Optional[Dict[str, Any]] = None,
                   ignore_errors: bool = False):
        """
        Registers a function to be called on scope exit.
        """

        self._register_callback(self._ExitHandler,
                                ignore_errors=ignore_errors,
                                callback=callback,
                                args=args,
                                kwargs=kwargs)

    def _register_callback(self,
                           handler_type,
                           callback: Callable,
                           args: Tuple[Any] = None,
                           kwargs: Dict[str, Any] = None,
                           ignore_errors: bool = False):
        if args or kwargs:
            callback = partial(callback, *args, **(kwargs or {}))

        self._stack.push(handler_type(callback, ignore_errors=ignore_errors))

    def add(self, cm: ContextManager[T]) -> T:
        """
        Enters a context manager and adds it to the exit stack.

        Returns: cm.__enter__() result
        """

        return self._stack.enter_context(cm)

    def enable(self):
        self.enabled = True

    def disable(self):
        self.enabled = False

    def close(self):
        self.__exit__(None, None, None)

    def __enter__(self) -> Scope:
        return self

    def __exit__(self, exc_type, exc_value, exc_traceback):
        if not self.enabled:
            return

        self._stack.__exit__(exc_type, exc_value, exc_traceback)
        self._stack.pop_all()  # prevent issues on repetitive calls

    @classmethod
    def current(cls) -> Scope:
        return cls._thread_locals.current

    @contextmanager
    def as_current(self):
        previous = getattr(self._thread_locals, 'current', None)
        self._thread_locals.current = self
        try:
            yield
        finally:
            self._thread_locals.current = previous
Exemple #6
0
class State:
    def __init__(self):
        # Variables which manage state transitions.
        self._next = deque()
        self._debug_step = 0
        # Manage all resources so they get cleaned up whenever the state
        # machine exits for any reason.
        self.resources = ExitStack()

    def close(self):
        # Transfer all resources to a new ExitStack, and release them from
        # there.  That way, if .close() gets called more than once, only the
        # first call will release the resources, while subsequent ones will
        # no-op.
        self.resources.pop_all().close()

    def __enter__(self):
        return self

    def __exit__(self, *exception):
        self.close()
        # Don't suppress any exceptions.
        return False

    def __del__(self):
        self.close()

    def __iter__(self):
        return self

    # We can't pickle the resources ExitStack, so if there's anything
    # valuable in there, the subclass must override __getstate__() and
    # __setstate__() as appropriate.

    def __getstate__(self):
        return dict(
            state=[function.__name__ for function in self._next],
            debug_step=self._debug_step,
        )

    def __setstate__(self, state):
        self._next = deque()
        for name in state['state']:
            self._next.append(getattr(self, name))
        self._debug_step = state['debug_step']
        self.resources = ExitStack()

    def _pop(self):
        step = self._next.popleft()
        # step could be a partial or a method.
        name = getattr(step, 'func', step).__name__
        log.debug('-> [{:2}] {}'.format(self._debug_step, name))
        return step, name

    def __next__(self):
        try:
            step, name = self._pop()
            step()
            self._debug_step += 1
        except IndexError:
            # Do not chain the exception.
            self.close()
            raise StopIteration from None
        except:
            log.exception(
                'uncaught exception in state machine step: [{}] {}'.format(
                    self._debug_step, name))
            self.close()
            raise

    def run_thru(self, stop_after):
        """Partially run the state machine.

        Note that any resources maintained by this state machine are
        *not* automatically cleaned up when .run_thru() completes,
        unless an exception occurrs, because execution can be continued.
        Call .close() explicitly to release the resources.

        :param stop_after: Name or step number of the method to run the state
            machine through.  In other words, the state machine runs until the
            specified step completes.  Step numbers begin at 0.
        """
        while True:
            try:
                step, name = self._pop()
            except (StopIteration, IndexError):
                # We're done.
                break
            try:
                step()
            except:
                self.close()
                raise
            try:
                if name == stop_after or self._debug_step == stop_after:
                    break
            finally:
                self._debug_step += 1

    def run_until(self, stop_before):
        """Partially run the state machine.

        Note that any resources maintained by this state machine are
        *not* automatically cleaned up when .run_until() completes,
        unless an exception occurs, because execution can be continued.
        Call .close() explicitly to release the resources.

        :param stop_before: Name or step number of method that the state
            machine is run until the specified step is reached.  Unlike
            `run_thru()` the step is not run.  Step numbers begin at 0.
        """
        while True:
            try:
                step, name = self._pop()
            except (StopIteration, IndexError):
                # We're done.
                break
            if name == stop_before or self._debug_step == stop_before:
                # Stop executing, but not before we push the last state back
                # onto the deque.  Otherwise, resuming the state machine would
                # skip this step.
                self._next.appendleft(step)
                break
            try:
                step()
            except:
                self.close()
                raise
            self._debug_step += 1
Exemple #7
0
class HLinkDB:
    def __init__(self, filename):
        self.closed = False
        self._cleanup = ExitStack()
        self._filename = filename
        self._pending_save = None
        # Map a "dev:ino" node to a list of paths associated with that node.
        self._node_paths = pickle_load(filename) or {}
        # Map a path to a "dev:ino" node (a reverse hard link index).
        self._path_node = {}
        for node, paths in self._node_paths.items():
            for path in paths:
                self._path_node[path] = node

    def prepare_save(self):
        """ Commit all of the relevant data to disk.  Do as much work
        as possible without actually making the changes visible."""
        if self._pending_save:
            raise Error('save of %r already in progress' % self._filename)
        with self._cleanup:
            if self._node_paths:
                dir, name = os.path.split(self._filename)
                self._pending_save = atomically_replaced_file(self._filename,
                                                              mode='wb',
                                                              buffering=65536)
                with self._cleanup.enter_context(self._pending_save) as f:
                    pickle.dump(self._node_paths, f, 2)
            else:  # No data
                self._cleanup.callback(lambda: unlink(self._filename))
            self._cleanup = self._cleanup.pop_all()

    def commit_save(self):
        self.closed = True
        if self._node_paths and not self._pending_save:
            raise Error('cannot commit save of %r; no save prepared' %
                        self._filename)
        self._cleanup.close()
        self._pending_save = None

    def abort_save(self):
        self.closed = True
        with self._cleanup:
            if self._pending_save:
                self._pending_save.cancel()
        self._pending_save = None

    def __enter__(self):
        return self

    def __exit__(self, type, value, traceback):
        self.abort_save()

    def __del__(self):
        assert self.closed

    def add_path(self, path, dev, ino):
        # Assume path is new.
        node = b'%d:%d' % (dev, ino)
        self._path_node[path] = node
        link_paths = self._node_paths.get(node)
        if link_paths and path not in link_paths:
            link_paths.append(path)
        else:
            self._node_paths[node] = [path]

    def _del_node_path(self, node, path):
        link_paths = self._node_paths[node]
        link_paths.remove(path)
        if not link_paths:
            del self._node_paths[node]

    def change_path(self, path, new_dev, new_ino):
        prev_node = self._path_node.get(path)
        if prev_node:
            self._del_node_path(prev_node, path)
        self.add_path(new_dev, new_ino, path)

    def del_path(self, path):
        # Path may not be in db (if updating a pre-hardlink support index).
        node = self._path_node.get(path)
        if node:
            self._del_node_path(node, path)
            del self._path_node[path]

    def node_paths(self, dev, ino):
        node = b'%d:%d' % (dev, ino)
        return self._node_paths[node]
Exemple #8
0
class State:
    def __init__(self):
        # Variables which manage state transitions.
        self._next = deque()
        self._debug_step = 1
        # Manage all resources so they get cleaned up whenever the state
        # machine exits for any reason.
        self.resources = ExitStack()

    def close(self):
        # Transfer all resources to a new ExitStack, and release them from
        # there.  That way, if .close() gets called more than once, only the
        # first call will release the resources, while subsequent ones will
        # no-op.
        self.resources.pop_all().close()

    def __enter__(self):
        return self

    def __exit__(self, *exception):
        self.close()
        # Don't suppress any exceptions.
        return False

    def __del__(self):
        self.close()

    def __iter__(self):
        return self

    def _pop(self):
        step = self._next.popleft()
        # step could be a partial or a method.
        name = getattr(step, 'func', step).__name__
        log.debug('-> [{:2}] {}'.format(self._debug_step, name))
        return step, name

    def __next__(self):
        try:
            step, name = self._pop()
            step()
            self._debug_step += 1
        except IndexError:
            # Do not chain the exception.
            self.close()
            raise StopIteration from None
        except:
            log.exception('uncaught exception in state machine')
            self.close()
            raise

    def run_thru(self, stop_after):
        """Partially run the state machine.

        Note that any resources maintained by this state machine are
        *not* automatically cleaned up when .run_thru() completes,
        unless an exception occurrs, because execution can be continued.
        Call .close() explicitly to release the resources.

        :param stop_after: Name of method to run the state machine
            through.  In other words, the state machine runs until the
            named method completes.
        """
        while True:
            try:
                step, name = self._pop()
            except (StopIteration, IndexError):
                # We're done.
                break
            try:
                step()
            except:
                self.close()
                raise
            self._debug_step += 1
            if name == stop_after:
                break

    def run_until(self, stop_before):
        """Partially run the state machine.

        Note that any resources maintained by this state machine are
        *not* automatically cleaned up when .run_until() completes,
        unless an exception occurs, because execution can be continued.
        Call .close() explicitly to release the resources.

        :param stop_before: Name of method that the state machine is run
            until the method is reached.  Unlike `run_thru()` the named
            method is not run.
        """
        while True:
            try:
                step, name = self._pop()
            except (StopIteration, IndexError):
                # We're done.
                break
            if name == stop_before:
                # Stop executing, but not before we push the last state back
                # onto the deque.  Otherwise, resuming the state machine would
                # skip this step.
                self._next.appendleft(step)
                break
            try:
                step()
            except:
                self.close()
                raise
            self._debug_step += 1