Пример #1
0
def _ensure_urlparser_initialized():
    """
    Ensure that the appropriate clobbering of variables in the
    urlparser module has been done. In the future, the need for this
    clobbering to begin with should preferably be eliminated.
    """
    def init():
        global _urlparser_initialized

        if not _urlparser_initialized:
            # These URL schemes have a backend with a notion of an RFC "network location".
            # The 'file' and 's3+http' schemes should not be in this list.
            # 'http' and 'https' are not actually used for duplicity backend urls, but are needed
            # in order to properly support urls returned from some webdav servers. adding them here
            # is a hack. we should instead not stomp on the url parsing module to begin with.
            #
            # todo: eliminate the need for backend specific hacking here completely.
            urlparser.uses_netloc = [
                'ftp', 'ftps', 'hsi', 'rsync', 's3', 'u1', 'scp', 'ssh',
                'sftp', 'webdav', 'webdavs', 'gdocs', 'http', 'https', 'imap',
                'imaps'
            ]

            # Do not transform or otherwise parse the URL path component.
            urlparser.uses_query = []
            urlparser.uses_fragm = []

            _urlparser_initialized = True

    dup_threading.with_lock(_urlparser_initialized_lock, init)
 def trampoline():
     try:
         self.__execute_caller(caller)
     finally:
         def complete_worker():
             self.__worker_count -= 1
             log.Debug("%s: %s" % (self.__class__.__name__,
                                   _("active workers = %d") % (self.__worker_count,)))
             self.__cv.notifyAll()
         with_lock(self.__cv, complete_worker)
    def wait(self):
        """
        Wait for the scheduler to become entirely empty (i.e., all
        tasks having run to completion).

        IMPORTANT: This is only useful with a single caller scheduling
        tasks, such that no call to schedule_task() is currently in
        progress or may happen subsequently to the call to wait().
        """
        def _wait():
            interruptably_wait(self.__cv, lambda: self.__worker_count == 0 and self.__waiter_count == 0)

        with_lock(self.__cv, _wait)
    def __execute_caller(self, caller):
            # The caller half that we get here will not propagate
            # errors back to us, but rather propagate it back to the
            # "other half" of the async split.
            succeeded, waiter = caller()
            if not succeeded:
                def _signal_failed():
                    if not self.__failed:
                        self.__failed = True
                        self.__failed_waiter = waiter
                        self.__cv.notifyAll()
                with_lock(self.__cv, _signal_failed)

            log.Info("%s: %s" % (self.__class__.__name__,
                     _("task execution done (success: %s)") % succeeded),
                     log.InfoCode.asynchronous_upload_done)
Пример #5
0
def _ensure_urlparser_initialized():
    """
    Ensure that the appropriate clobbering of variables in the
    urlparser module has been done. In the future, the need for this
    clobbering to begin with should preferably be eliminated.
    """

    def init():
        global _urlparser_initialized

        if not _urlparser_initialized:
            # These URL schemes have a backend with a notion of an RFC "network location".
            # The 'file' and 's3+http' schemes should not be in this list.
            # 'http' and 'https' are not actually used for duplicity backend urls, but are needed
            # in order to properly support urls returned from some webdav servers. adding them here
            # is a hack. we should instead not stomp on the url parsing module to begin with.
            #
            # todo: eliminate the need for backend specific hacking here completely.
            urlparser.uses_netloc = [
                "ftp",
                "ftps",
                "hsi",
                "rsync",
                "s3",
                "u1",
                "scp",
                "ssh",
                "sftp",
                "webdav",
                "webdavs",
                "gdocs",
                "http",
                "https",
                "imap",
                "imaps",
                "mega",
            ]

            # Do not transform or otherwise parse the URL path component.
            urlparser.uses_query = []
            urlparser.uses_fragm = []

            _urlparser_initialized = True

    dup_threading.with_lock(_urlparser_initialized_lock, init)
    def insert_barrier(self):
        """
        Proclaim that any tasks scheduled prior to the call to this
        method MUST be executed prior to any tasks scheduled after the
        call to this method.

        The intended use case is that if task B depends on A, a
        barrier must be inserted in between to guarantee that A
        happens before B.
        """
        log.Debug("%s: %s" % (self.__class__.__name__, _("inserting barrier")))
        # With concurrency 0 it's a NOOP, and due to the special case in
        # task scheduling we do not want to append to the queue (will never
        # be popped).
        if self.__concurrency > 0:
            def _insert_barrier():
                self.__barrier = True

            with_lock(self.__cv, _insert_barrier)
Пример #7
0
    def __run_asynchronously(self, fn, params):
        (waiter, caller) = async_split(lambda: fn(*params))

        def check_pending_failure():
            if self.__failed:
                log.Info(
                    "%s: %s" % (self.__class__.__name__,
                                _("a previously scheduled task has failed; "
                                  "propagating the result immediately")),
                    log.InfoCode.asynchronous_upload_done)
                self.__failed_waiter()
                raise AssertionError(
                    "%s: waiter should have raised an exception; "
                    "this is a bug" % (self.__class__.__name__, ))

        def wait_for_and_register_launch():
            check_pending_failure()  # raise on fail
            while self.__worker_count >= self.__concurrency or self.__barrier:
                if self.__worker_count == 0:
                    assert self.__barrier, "barrier should be in effect"
                    self.__barrier = False
                    self.__cv.notifyAll()
                else:
                    self.__waiter_count += 1
                    self.__cv.wait()
                    self.__waiter_count -= 1

                check_pending_failure()  # raise on fail

            self.__worker_count += 1
            log.Debug("%s: %s" %
                      (self.__class__.__name__, _("active workers = %d") %
                       (self.__worker_count, )))

        # simply wait for an OK condition to start, then launch our worker. the worker
        # never waits on us, we just wait for them.
        with_lock(self.__cv, wait_for_and_register_launch)

        self.__start_worker(caller)

        return waiter
    def __run_asynchronously(self, fn, params):
        (waiter, caller) = async_split(lambda: fn(*params))

        def check_pending_failure():
            if self.__failed:
                log.Info("%s: %s" % (self.__class__.__name__,
                         _("a previously scheduled task has failed; "
                           "propagating the result immediately")),
                         log.InfoCode.asynchronous_upload_done)
                self.__failed_waiter()
                raise AssertionError("%s: waiter should have raised an exception; "
                                     "this is a bug" % (self.__class__.__name__,))

        def wait_for_and_register_launch():
            check_pending_failure()    # raise on fail
            while self.__worker_count >= self.__concurrency or self.__barrier:
                if self.__worker_count == 0:
                    assert self.__barrier, "barrier should be in effect"
                    self.__barrier = False
                    self.__cv.notifyAll()
                else:
                    self.__waiter_count += 1
                    self.__cv.wait()
                    self.__waiter_count -= 1

                check_pending_failure() # raise on fail

            self.__worker_count += 1
            log.Debug("%s: %s" % (self.__class__.__name__,
                                  _("active workers = %d") % (self.__worker_count,)))

        # simply wait for an OK condition to start, then launch our worker. the worker
        # never waits on us, we just wait for them.
        with_lock(self.__cv, wait_for_and_register_launch)

        self.__start_worker(caller)

        return waiter