コード例 #1
0
    def __init__(self, engine, dao, max_file_processors=5):
        '''
        Constructor
        '''
        super(QueueManager, self).__init__()
        self._dao = dao
        self._engine = engine
        self._local_folder_queue = Queue()
        self._local_file_queue = Queue()
        self._remote_file_queue = Queue()
        self._remote_folder_queue = Queue()
        self._connected = local()
        self._local_folder_enable = True
        self._local_file_enable = True
        self._remote_folder_enable = True
        self._remote_file_enable = True
        self._local_folder_thread = None
        self._local_file_thread = None
        self._remote_folder_thread = None
        self._remote_file_thread = None
        self._error_threshold = ERROR_THRESHOLD
        self._error_interval = 60
        self.set_max_processors(max_file_processors)
        self._threads_pool = list()
        self._processors_pool = list()
        self._get_file_lock = Lock()
        # Should not operate on thread while we are inspecting them
        '''
        This error required to add a lock for inspecting threads, as the below Traceback shows the processor thread was ended while the method was running
        Traceback (most recent call last):
           File "/Users/hudson/tmp/workspace/FT-nuxeo-drive-master-osx/nuxeo-drive-client/nxdrive/engine/watcher/local_watcher.py", line 845, in handle_watchdog_event
             self.scan_pair(rel_path)
           File "/Users/hudson/tmp/workspace/FT-nuxeo-drive-master-osx/nuxeo-drive-client/nxdrive/engine/watcher/local_watcher.py", line 271, in scan_pair
             self._suspend_queue()
           File "/Users/hudson/tmp/workspace/FT-nuxeo-drive-master-osx/nuxeo-drive-client/nxdrive/engine/watcher/local_watcher.py", line 265, in _suspend_queue
             for processor in self._engine.get_queue_manager().get_processors_on('/', exact_match=False):
           File "/Users/hudson/tmp/workspace/FT-nuxeo-drive-master-osx/nuxeo-drive-client/nxdrive/engine/queue_manager.py", line 413, in get_processors_on
             res.append(self._local_file_thread.worker)
         AttributeError: 'NoneType' object has no attribute 'worker'
        '''
        self._thread_inspection = Lock()

        # ERROR HANDLING
        self._error_lock = Lock()
        self._on_error_queue = BlacklistQueue(delay=DEFAULT_DELAY)
        self._error_timer = QTimer()
        # TODO newErrorGiveUp signal is not connected
        self._error_timer.timeout.connect(self._on_error_timer)
        self.newError.connect(self._on_new_error)
        self.queueProcessing.connect(self.launch_processors)
        # LAST ACTION
        self._dao.register_queue_manager(self)
コード例 #2
0
ファイル: queue_manager.py プロジェクト: mkeshava/nuxeo-drive
    def __init__(self, engine, dao, max_file_processors=(0, 0, 5)):
        '''
        Constructor
        '''
        super(QueueManager, self).__init__()
        self._dao = dao
        self._engine = engine
        self._local_folder_queue = Queue()
        self._local_file_queue = Queue()
        self._remote_file_queue = Queue()
        self._remote_folder_queue = Queue()
        self._connected = local()
        self._local_folder_enable = True
        self._local_file_enable = True
        self._remote_folder_enable = True
        self._remote_file_enable = True
        self._local_folder_thread = None
        self._local_file_thread = None
        self._remote_folder_thread = None
        self._remote_file_thread = None
        self._error_threshold = ERROR_THRESHOLD
        self._error_interval = 60
        self._max_local_processors = 0
        self._max_remote_processors = 0
        self._max_generic_processors = 0
        self.set_max_processors(max_file_processors)
        self._threads_pool = list()
        self._processors_pool = list()
        self._get_file_lock = Lock()
        # Should not operate on thread while we are inspecting them
        '''
        This error required to add a lock for inspecting threads, as the below Traceback shows the processor thread was ended while the method was running
        Traceback (most recent call last):
           File "/Users/hudson/tmp/workspace/FT-nuxeo-drive-master-osx/nuxeo-drive-client/nxdrive/engine/watcher/local_watcher.py", line 845, in handle_watchdog_event
             self.scan_pair(rel_path)
           File "/Users/hudson/tmp/workspace/FT-nuxeo-drive-master-osx/nuxeo-drive-client/nxdrive/engine/watcher/local_watcher.py", line 271, in scan_pair
             self._suspend_queue()
           File "/Users/hudson/tmp/workspace/FT-nuxeo-drive-master-osx/nuxeo-drive-client/nxdrive/engine/watcher/local_watcher.py", line 265, in _suspend_queue
             for processor in self._engine.get_queue_manager().get_processors_on('/', exact_match=False):
           File "/Users/hudson/tmp/workspace/FT-nuxeo-drive-master-osx/nuxeo-drive-client/nxdrive/engine/queue_manager.py", line 413, in get_processors_on
             res.append(self._local_file_thread.worker)
         AttributeError: 'NoneType' object has no attribute 'worker'
        '''
        self._thread_inspection = Lock()

        # ERROR HANDLING
        self._error_lock = Lock()
        self._on_error_queue = BlacklistQueue(delay=DEFAULT_DELAY)
        self._error_timer = QTimer()
        # TODO newErrorGiveUp signal is not connected
        self._error_timer.timeout.connect(self._on_error_timer)
        self.newError.connect(self._on_new_error)
        self.queueProcessing.connect(self.launch_processors)
        # LAST ACTION
        self._dao.register_queue_manager(self)
コード例 #3
0
ファイル: queue_manager.py プロジェクト: mkeshava/nuxeo-drive
class QueueManager(QObject):
    # Always create thread from the main thread
    newItem = pyqtSignal(object)
    newError = pyqtSignal(object)
    newErrorGiveUp = pyqtSignal(object)
    queueEmpty = pyqtSignal()
    queueProcessing = pyqtSignal()
    queueFinishedProcessing = pyqtSignal()
    # Only used by Unit Test
    _disable = False
    '''
    classdocs
    '''

    def __init__(self, engine, dao, max_file_processors=(0, 0, 5)):
        '''
        Constructor
        '''
        super(QueueManager, self).__init__()
        self._dao = dao
        self._engine = engine
        self._local_folder_queue = Queue()
        self._local_file_queue = Queue()
        self._remote_file_queue = Queue()
        self._remote_folder_queue = Queue()
        self._connected = local()
        self._local_folder_enable = True
        self._local_file_enable = True
        self._remote_folder_enable = True
        self._remote_file_enable = True
        self._local_folder_thread = None
        self._local_file_thread = None
        self._remote_folder_thread = None
        self._remote_file_thread = None
        self._error_threshold = ERROR_THRESHOLD
        self._error_interval = 60
        self._max_local_processors = 0
        self._max_remote_processors = 0
        self._max_generic_processors = 0
        self.set_max_processors(max_file_processors)
        self._threads_pool = list()
        self._processors_pool = list()
        self._get_file_lock = Lock()
        # Should not operate on thread while we are inspecting them
        '''
        This error required to add a lock for inspecting threads, as the below Traceback shows the processor thread was ended while the method was running
        Traceback (most recent call last):
           File "/Users/hudson/tmp/workspace/FT-nuxeo-drive-master-osx/nuxeo-drive-client/nxdrive/engine/watcher/local_watcher.py", line 845, in handle_watchdog_event
             self.scan_pair(rel_path)
           File "/Users/hudson/tmp/workspace/FT-nuxeo-drive-master-osx/nuxeo-drive-client/nxdrive/engine/watcher/local_watcher.py", line 271, in scan_pair
             self._suspend_queue()
           File "/Users/hudson/tmp/workspace/FT-nuxeo-drive-master-osx/nuxeo-drive-client/nxdrive/engine/watcher/local_watcher.py", line 265, in _suspend_queue
             for processor in self._engine.get_queue_manager().get_processors_on('/', exact_match=False):
           File "/Users/hudson/tmp/workspace/FT-nuxeo-drive-master-osx/nuxeo-drive-client/nxdrive/engine/queue_manager.py", line 413, in get_processors_on
             res.append(self._local_file_thread.worker)
         AttributeError: 'NoneType' object has no attribute 'worker'
        '''
        self._thread_inspection = Lock()

        # ERROR HANDLING
        self._error_lock = Lock()
        self._on_error_queue = BlacklistQueue(delay=DEFAULT_DELAY)
        self._error_timer = QTimer()
        # TODO newErrorGiveUp signal is not connected
        self._error_timer.timeout.connect(self._on_error_timer)
        self.newError.connect(self._on_new_error)
        self.queueProcessing.connect(self.launch_processors)
        # LAST ACTION
        self._dao.register_queue_manager(self)

    def init_processors(self):
        log.trace("Init processors")
        self.newItem.connect(self.launch_processors)
        self.queueProcessing.emit()

    def shutdown_processors(self):
        log.trace("Shutdown processors")
        try:
            self.newItem.disconnect(self.launch_processors)
        except TypeError:
            # TypeError: disconnect() failed between 'newItem' and 'launch_processors'
            pass

    def init_queue(self, queue):
        # Dont need to change modify as State is compatible with QueueItem
        for item in queue:
            self.push(item)

    def _copy_queue(self, queue):
        result = deepcopy(queue.queue)
        result.reverse()
        return result

    def set_max_processors(self, max_file_processors):
        max_local_processors = max_file_processors[0]
        max_remote_processors = max_file_processors[1]
        max_generic_processors = max_file_processors[2]
        if max_local_processors < 1:
            max_local_processors = 1
        if max_remote_processors < 1:
            max_remote_processors = 1
        if max_generic_processors < 2:
            max_generic_processors = 2
        self._max_local_processors = max_local_processors - 1
        self._max_remote_processors = max_remote_processors - 1
        self._max_generic_processors = max_generic_processors - 2
        log.trace('number of additional processors: %d local, %d remote, %d generic',
                  self._max_local_processors, self._max_remote_processors, self._max_generic_processors)

    def resume(self):
        log.debug("Resuming queue")
        self.enable_local_file_queue(True, False)
        self.enable_local_folder_queue(True, False)
        self.enable_remote_file_queue(True, False)
        self.enable_remote_folder_queue(True, False)
        self.queueProcessing.emit()

    def is_paused(self):
        return (not self._local_file_enable or
                not self._local_folder_enable or
                not self._remote_file_enable or
                not self._remote_folder_enable)

    def suspend(self):
        log.debug("Suspending queue")
        self.enable_local_file_queue(False)
        self.enable_local_folder_queue(False)
        self.enable_remote_file_queue(False)
        self.enable_remote_folder_queue(False)

    def restart(self, num_processors=None, wait=False):
        if num_processors is not None:
            assert sum(num_processors) <= MAX_NUMBER_PROCESSORS, \
                'total number of additional processors must be %d or less' % MAX_NUMBER_PROCESSORS
        # stop new items from being processed
        self.shutdown_processors()
        # attempt to stop current processors
        if self._local_file_thread is not None:
            self._local_file_thread.worker.stop()
        if self._local_folder_thread is not None:
            self._local_folder_thread.worker.stop()
        if self._remote_file_thread is not None:
            self._remote_file_thread.worker.stop()
        if self._remote_folder_thread is not None:
            self._remote_folder_thread.worker.stop()
        for p in self._processors_pool:
            p.worker.stop()
        if wait:
            while self.is_active():
                QCoreApplication.processEvents()
                sleep(0.1)
        self.set_max_processors(num_processors)
        # re-enable new items to trigger processing
        self.init_processors()
        # launch processors for new items in the queue, if any
        self.launch_processors()

    def enable_local_file_queue(self, value=True, emit=True):
        self._local_file_enable = value
        if self._local_file_thread is not None and not value:
            self._local_file_thread.quit()
        if value and emit:
            self.queueProcessing.emit()

    def enable_local_folder_queue(self, value=True, emit=True):
        self._local_folder_enable = value
        if self._local_folder_thread is not None and not value:
            self._local_folder_thread.quit()
        if value and emit:
            self.queueProcessing.emit()

    def enable_remote_file_queue(self, value=True, emit=True):
        self._remote_file_enable = value
        if self._remote_file_thread is not None and not value:
            self._remote_file_thread.quit()
        if value and emit:
            self.queueProcessing.emit()

    def enable_remote_folder_queue(self, value=True, emit=True):
        self._remote_folder_enable = value
        if self._remote_folder_thread is not None and not value:
            self._remote_folder_thread.quit()
        if value and emit:
            self.queueProcessing.emit()

    def get_local_file_queue(self):
        return self._copy_queue(self._local_file_queue)

    def get_remote_file_queue(self):
        return self._copy_queue(self._remote_file_queue)

    def get_local_folder_queue(self):
        return self._copy_queue(self._local_folder_queue)

    def get_remote_folder_queue(self):
        return self._copy_queue(self._remote_folder_queue)

    def push_ref(self, row_id, folderish, pair_state):
        self.push(QueueItem(row_id, folderish, pair_state))

    def push(self, state):
        if state.pair_state is None:
            log.trace("Don't push an empty pair_state: %r", state)
            return
        log.trace("Pushing %r", state)
        row_id = state.id
        if state.pair_state.startswith('locally'):
            if state.folderish:
                self._local_folder_queue.put(state)
                log.trace('Pushed to _local_folder_queue, now of size: %d', self._local_folder_queue.qsize())
            else:
                if "deleted" in state.pair_state:
                    self._engine.cancel_action_on(state.id)
                self._local_file_queue.put(state)
                log.trace('Pushed to _local_file_queue, now of size: %d', self._local_file_queue.qsize())
            self.newItem.emit(row_id)
        elif state.pair_state.startswith('remotely'):
            if state.folderish:
                self._remote_folder_queue.put(state)
                log.trace('Pushed to _remote_folder_queue, now of size: %d', self._remote_folder_queue.qsize())
            else:
                if "deleted" in state.pair_state:
                    self._engine.cancel_action_on(state.id)
                self._remote_file_queue.put(state)
                log.trace('Pushed to _remote_file_queue, now of size: %d', self._remote_file_queue.qsize())
            self.newItem.emit(row_id)
        else:
            # deleted and conflicted
            log.debug("Not processable state: %r", state)

    @pyqtSlot()
    def _on_error_timer(self):
        for item in self._on_error_queue.process_items():
            doc_pair = item.get()
            queueItem = QueueItem(doc_pair.id, doc_pair.folderish, doc_pair.pair_state)
            log.debug('Retrying blacklisted doc_pair: %r', doc_pair)
            self.push(queueItem)

        if self._on_error_queue.is_empty():
            self._error_timer.stop()
            log.debug('blacklist queue timer stopped')

    def _is_on_error(self, row_id):
        return self._on_error_queue.exists(row_id)

    @pyqtSlot()
    def _on_new_error(self):
        self._error_timer.start(1000)
        log.debug('blacklist queue timer started')

    def get_errors_count(self):
        return self._on_error_queue.size()

    def get_error_threshold(self):
        return self._error_threshold

    def push_error(self, doc_pair, exception=None):
        error_count = doc_pair.error_count
        if (exception is not None and type(exception) == WindowsError
            and hasattr(exception, 'winerror') and exception.winerror == WINERROR_CODE_PROCESS_CANNOT_ACCESS_FILE):
            log.debug("Detected WindowsError with code %d: '%s', won't increase next try interval",
                      WINERROR_CODE_PROCESS_CANNOT_ACCESS_FILE,
                      exception.strerror if hasattr(exception, 'strerror') else '')
            error_count = 1
        if error_count > self._error_threshold:
            self._on_error_queue.remove(doc_pair.id)
            # TODO this signal is not connected
            self.newErrorGiveUp.emit(doc_pair.id)
            log.debug("Giving up on pair : %r", doc_pair)
            return

        interval = self._on_error_queue.push(doc_pair.id, doc_pair, count=doc_pair.error_count)
        log.debug("Blacklisting pair for %ds (error count=%d): %r", interval, doc_pair.error_count, doc_pair)
        if not self._error_timer.isActive():
            self.newError.emit(doc_pair.id)

    def requeue_errors(self):
        for doc_pair in self._on_error_queue.items():
            doc_pair.error_next_try = 0

    def _get_local_folder(self):
        if self._local_folder_queue.empty():
            return None
        try:
            state = self._local_folder_queue.get(True, 3)
        except Empty:
            return None
        if state is not None and self._is_on_error(state.id):
            return self._get_local_folder()
        return state

    def _get_local_file(self):
        if self._local_file_queue.empty():
            return None
        try:
            state = self._local_file_queue.get(True, 3)
        except Empty:
            return None
        if state is not None and self._is_on_error(state.id):
            return self._get_local_file()
        return state

    def _get_remote_folder(self):
        if self._remote_folder_queue.empty():
            return None
        try:
            state = self._remote_folder_queue.get(True, 3)
        except Empty:
            return None
        if state is not None and self._is_on_error(state.id):
            return self._get_remote_folder()
        return state

    def _get_remote_file(self):
        if self._remote_file_queue.empty():
            return None
        try:
            state = self._remote_file_queue.get(True, 3)
        except Empty:
            return None
        if state is not None and self._is_on_error(state.id):
            return self._get_remote_file()
        return state

    def _get_file(self):
        self._get_file_lock.acquire()
        if self._remote_file_queue.empty() and self._local_file_queue.empty():
            self._get_file_lock.release()
            return None
        state = None
        if (self._remote_file_queue.qsize() > self._local_file_queue.qsize()):
            state = self._get_remote_file()
        else:
            state = self._get_local_file()
        self._get_file_lock.release()
        if state is not None and self._is_on_error(state.id):
            return self._get_file()
        return state

    @pyqtSlot()
    def _thread_finished(self):
        self._thread_inspection.acquire()
        try:
            for thread in self._processors_pool:
                if thread.isFinished():
                    self._processors_pool.remove(thread)
                    QueueManager.clear_client_transfer_stats(thread.worker.get_thread_id())
            if (self._local_folder_thread is not None and
                    self._local_folder_thread.isFinished()):
                self._local_folder_thread = None
            if (self._local_file_thread is not None and
                    self._local_file_thread.isFinished()):
                self._local_file_thread = None
            if (self._remote_folder_thread is not None and
                    self._remote_folder_thread.isFinished()):
                self._remote_folder_thread = None
            if (self._remote_file_thread is not None and
                    self._remote_file_thread.isFinished()):
                self._remote_file_thread = None
            if not self._engine.is_paused() and not self._engine.is_stopped():
                self.newItem.emit(None)
        finally:
            self._thread_inspection.release()

    def active(self):
        # Recheck threads
        self._thread_finished()
        return self.is_active()

    def is_active(self):
        return (self._local_folder_thread is not None
                or self._local_file_thread is not None
                or self._remote_file_thread is not None
                or self._remote_folder_thread is not None
                or len(self._processors_pool) > 0)

    def _create_thread(self, item_getter, name=None):
        processor = self._engine.create_processor(item_getter, name=name)
        thread = self._engine.create_thread(worker=processor)
        thread.finished.connect(self._thread_finished)
        thread.terminated.connect(self._thread_finished)
        thread.start()
        return thread

    def get_metrics(self):
        metrics = dict()
        metrics["local_folder_queue"] = self._local_folder_queue.qsize()
        metrics["local_file_queue"] = self._local_file_queue.qsize()
        metrics["remote_folder_queue"] = self._remote_folder_queue.qsize()
        metrics["remote_file_queue"] = self._remote_file_queue.qsize()
        metrics["remote_file_thread"] = self._remote_file_thread is not None
        metrics["remote_folder_thread"] = self._remote_folder_thread is not None
        metrics["local_file_thread"] = self._local_file_thread is not None
        metrics["local_folder_thread"] = self._local_folder_thread is not None
        metrics["error_queue"] = self.get_errors_count()
        metrics["total_queue"] = (metrics["local_folder_queue"] + metrics["local_file_queue"]
                                  + metrics["remote_folder_queue"] + metrics["remote_file_queue"])
        metrics["additional_processors"] = len(self._processors_pool)
        return metrics

    def get_overall_size(self):
        return (self._local_folder_queue.qsize() + self._local_file_queue.qsize()
                + self._remote_folder_queue.qsize() + self._remote_file_queue.qsize())

    def is_processing_file(self, worker, path, exact_match=False):
        if not hasattr(worker, "_current_doc_pair"):
            return False
        doc_pair = worker._current_doc_pair
        if (doc_pair is None or doc_pair.local_path is None):
            return False
        if exact_match:
            result = doc_pair.local_path == path
        else:
            result = doc_pair.local_path.startswith(path)
        if result:
            log.trace("Worker(%r) is processing: %r", worker.get_metrics(), path)
        return result

    def interrupt_processors_on(self, path, exact_match=True):
        for proc in self.get_processors_on(path, exact_match):
            proc.stop()

    def get_processors_on(self, path, exact_match=True):
        self._thread_inspection.acquire()
        try:
            res = []
            if self._local_folder_thread is not None:
                if self.is_processing_file(self._local_folder_thread.worker, path, exact_match):
                    res.append(self._local_folder_thread.worker)
            if self._remote_folder_thread is not None:
                if self.is_processing_file(self._remote_folder_thread.worker, path, exact_match):
                    res.append(self._remote_folder_thread.worker)
            if self._local_file_thread is not None:
                if self.is_processing_file(self._local_file_thread.worker, path, exact_match):
                    res.append(self._local_file_thread.worker)
            if self._remote_file_thread is not None:
                if self.is_processing_file(self._remote_file_thread.worker, path, exact_match):
                    res.append(self._remote_file_thread.worker)
            for thread in self._processors_pool:
                if self.is_processing_file(thread.worker, path, exact_match):
                    res.append(thread.worker)
            return res
        finally:
            self._thread_inspection.release()

    def has_file_processors_on(self, path):
        self._thread_inspection.acquire()
        try:
            # First check local and remote file
            if self._local_file_thread is not None:
                if self.is_processing_file(self._local_file_thread.worker, path):
                    return True
            if self._remote_file_thread is not None:
                if self.is_processing_file(self._remote_file_thread.worker, path):
                    return True
            for thread in self._processors_pool:
                if self.is_processing_file(thread.worker, path):
                    return True
            return False
        finally:
            self._thread_inspection.release()

    @pyqtSlot()
    def launch_processors(self):
        if (self._disable or self.is_paused() or (self._local_folder_queue.empty() and self._local_file_queue.empty()
                                                  and self._remote_folder_queue.empty() and self._remote_file_queue.empty())):
            self.queueEmpty.emit()
            if not self.is_active():
                self.queueFinishedProcessing.emit()
            return

        log.trace("Launching processors")
        if not (self._local_folder_queue.empty() and self._local_file_queue.empty()):
            if self._local_folder_thread is None and not self._local_folder_queue.empty() and self._local_folder_enable:
                log.debug("creating local folder processor")
                self._local_folder_thread = self._create_thread(self._get_local_folder, name="LocalFolderProcessor")
            if self._local_file_thread is None and not self._local_file_queue.empty() and self._local_file_enable:
                log.debug("creating local file processor")
                self._local_file_thread = self._create_thread(self._get_local_file, name="LocalFileProcessor")

        if not (self._remote_folder_queue.empty() and self._remote_file_queue.empty()):
            if self._remote_folder_thread is None and not self._remote_folder_queue.empty() and self._remote_folder_enable:
                log.debug("creating remote folder processor")
                self._remote_folder_thread = self._create_thread(self._get_remote_folder, name="RemoteFolderProcessor")
            if self._remote_file_thread is None and not self._remote_file_queue.empty() and self._remote_file_enable:
                log.debug("creating remote file processor")
                self._remote_file_thread = self._create_thread(self._get_remote_file, name="RemoteFileProcessor")
        if self._remote_file_queue.qsize() + self._local_file_queue.qsize() == 0:
            return

        count = 0
        # log.trace('processor pool: %s', ','.join([t.worker.get_name() for t in self._processors_pool]))
        log.trace('max generic processors: %d', self._max_generic_processors)
        log.trace('max remote processors: %d', self._max_remote_processors)
        log.trace('max local processors: %d', self._max_local_processors)

        if not (self._local_file_queue.empty() and self._remote_file_queue.empty()):
            while len([t for t in self._processors_pool if t.worker.get_name() == "GenericProcessor"]) < self._max_generic_processors:
                self._processors_pool.append(self._create_thread(self._get_file, name="GenericProcessor"))
                count += 1
        if count > 0:
            log.trace("created %d additional file processor%s", count, 's' if count > 1 else '')
        count = 0
        if not self._remote_file_queue.empty():
            while len([t for t in self._processors_pool if t.worker.get_name() == "RemoteFileProcessor"]) < self._max_remote_processors:
                self._processors_pool.append(self._create_thread(self._get_remote_file, name="RemoteFileProcessor"))
                count += 1
        if count > 0:
            log.trace("created %d additional remote file processor%s", count, 's' if count > 1 else '')
        count = 0
        if not self._local_file_queue.empty():
            while len([t for t in self._processors_pool if t.worker.get_name() == "LocalFileProcessor"]) < self._max_local_processors:
                self._processors_pool.append(self._create_thread(self._get_local_file, name="LocalFileProcessor"))
                count += 1
        if count > 0:
            log.trace("created %d additional local file processor%s", count, 's' if count > 1 else '')

    @staticmethod
    def clear_client_transfer_stats(thread_id):
        if hasattr(BaseAutomationClient, 'download_stats') and BaseAutomationClient.download_stats is not None:
            BaseAutomationClient.download_stats.clear(thread_id)
        if hasattr(BaseAutomationClient, 'upload_stats') and BaseAutomationClient.upload_stats is not None:
            BaseAutomationClient.upload_stats.clear(thread_id)
        if hasattr(BaseAutomationClient, 'download_token_bucket') and \
                   BaseAutomationClient.download_token_bucket is not None:
            BaseAutomationClient.download_token_bucket.clear(thread_id)
        if hasattr(BaseAutomationClient, 'upload_token_bucket') and \
                   BaseAutomationClient.upload_token_bucket is not None:
            BaseAutomationClient.upload_token_bucket.clear(thread_id)
コード例 #4
0
class QueueManager(QObject):
    # Always create thread from the main thread
    newItem = pyqtSignal(object)
    newError = pyqtSignal(object)
    newErrorGiveUp = pyqtSignal(object)
    queueEmpty = pyqtSignal()
    queueProcessing = pyqtSignal()
    queueFinishedProcessing = pyqtSignal()
    # Only used by Unit Test
    _disable = False
    '''
    classdocs
    '''
    def __init__(self, engine, dao, max_file_processors=5):
        '''
        Constructor
        '''
        super(QueueManager, self).__init__()
        self._dao = dao
        self._engine = engine
        self._local_folder_queue = Queue()
        self._local_file_queue = Queue()
        self._remote_file_queue = Queue()
        self._remote_folder_queue = Queue()
        self._connected = local()
        self._local_folder_enable = True
        self._local_file_enable = True
        self._remote_folder_enable = True
        self._remote_file_enable = True
        self._local_folder_thread = None
        self._local_file_thread = None
        self._remote_folder_thread = None
        self._remote_file_thread = None
        self._error_threshold = ERROR_THRESHOLD
        self._error_interval = 60
        self.set_max_processors(max_file_processors)
        self._threads_pool = list()
        self._processors_pool = list()
        self._get_file_lock = Lock()
        # Should not operate on thread while we are inspecting them
        '''
        This error required to add a lock for inspecting threads, as the below Traceback shows the processor thread was ended while the method was running
        Traceback (most recent call last):
           File "/Users/hudson/tmp/workspace/FT-nuxeo-drive-master-osx/nuxeo-drive-client/nxdrive/engine/watcher/local_watcher.py", line 845, in handle_watchdog_event
             self.scan_pair(rel_path)
           File "/Users/hudson/tmp/workspace/FT-nuxeo-drive-master-osx/nuxeo-drive-client/nxdrive/engine/watcher/local_watcher.py", line 271, in scan_pair
             self._suspend_queue()
           File "/Users/hudson/tmp/workspace/FT-nuxeo-drive-master-osx/nuxeo-drive-client/nxdrive/engine/watcher/local_watcher.py", line 265, in _suspend_queue
             for processor in self._engine.get_queue_manager().get_processors_on('/', exact_match=False):
           File "/Users/hudson/tmp/workspace/FT-nuxeo-drive-master-osx/nuxeo-drive-client/nxdrive/engine/queue_manager.py", line 413, in get_processors_on
             res.append(self._local_file_thread.worker)
         AttributeError: 'NoneType' object has no attribute 'worker'
        '''
        self._thread_inspection = Lock()

        # ERROR HANDLING
        self._error_lock = Lock()
        self._on_error_queue = BlacklistQueue(delay=DEFAULT_DELAY)
        self._error_timer = QTimer()
        # TODO newErrorGiveUp signal is not connected
        self._error_timer.timeout.connect(self._on_error_timer)
        self.newError.connect(self._on_new_error)
        self.queueProcessing.connect(self.launch_processors)
        # LAST ACTION
        self._dao.register_queue_manager(self)

    def init_processors(self):
        log.trace("Init processors")
        self.newItem.connect(self.launch_processors)
        self.queueProcessing.emit()

    def shutdown_processors(self):
        log.trace("Shutdown processors")
        try:
            self.newItem.disconnect(self.launch_processors)
        except TypeError:
            # TypeError: disconnect() failed between 'newItem' and 'launch_processors'
            pass

    def init_queue(self, queue):
        # Dont need to change modify as State is compatible with QueueItem
        for item in queue:
            self.push(item)

    def _copy_queue(self, queue):
        result = deepcopy(queue.queue)
        result.reverse()
        return result

    def set_max_processors(self, max_file_processors):
        if max_file_processors < 2:
            max_file_processors = 2
        self._max_processors = max_file_processors - 2

    def resume(self):
        log.debug("Resuming queue")
        self.enable_local_file_queue(True, False)
        self.enable_local_folder_queue(True, False)
        self.enable_remote_file_queue(True, False)
        self.enable_remote_folder_queue(True, False)
        self.queueProcessing.emit()

    def is_paused(self):
        return (not self._local_file_enable or
                    not self._local_folder_enable or
                    not self._remote_file_enable or
                    not self._remote_folder_enable)

    def suspend(self):
        log.debug("Suspending queue")
        self.enable_local_file_queue(False)
        self.enable_local_folder_queue(False)
        self.enable_remote_file_queue(False)
        self.enable_remote_folder_queue(False)


    def enable_local_file_queue(self, value=True, emit=True):
        self._local_file_enable = value
        if self._local_file_thread is not None and not value:
            self._local_file_thread.quit()
        if value and emit:
            self.queueProcessing.emit()

    def enable_local_folder_queue(self, value=True, emit=True):
        self._local_folder_enable = value
        if self._local_folder_thread is not None and not value:
            self._local_folder_thread.quit()
        if value and emit:
            self.queueProcessing.emit()

    def enable_remote_file_queue(self, value=True, emit=True):
        self._remote_file_enable = value
        if self._remote_file_thread is not None and not value:
            self._remote_file_thread.quit()
        if value and emit:
            self.queueProcessing.emit()

    def enable_remote_folder_queue(self, value=True, emit=True):
        self._remote_folder_enable = value
        if self._remote_folder_thread is not None and not value:
            self._remote_folder_thread.quit()
        if value and emit:
            self.queueProcessing.emit()

    def get_local_file_queue(self):
        return self._copy_queue(self._local_file_queue)

    def get_remote_file_queue(self):
        return self._copy_queue(self._remote_file_queue)

    def get_local_folder_queue(self):
        return self._copy_queue(self._local_folder_queue)

    def get_remote_folder_queue(self):
        return self._copy_queue(self._remote_folder_queue)

    def push_ref(self, row_id, folderish, pair_state):
        self.push(QueueItem(row_id, folderish, pair_state))

    def push(self, state):
        if state.pair_state is None:
            log.trace("Don't push an empty pair_state: %r", state)
            return
        log.trace("Pushing %r", state)
        row_id = state.id
        if state.pair_state.startswith('locally'):
            if state.folderish:
                self._local_folder_queue.put(state)
                log.trace('Pushed to _local_folder_queue, now of size: %d', self._local_folder_queue.qsize())
            else:
                if "deleted" in state.pair_state:
                    self._engine.cancel_action_on(state.id)
                self._local_file_queue.put(state)
                log.trace('Pushed to _local_file_queue, now of size: %d', self._local_file_queue.qsize())
            self.newItem.emit(row_id)
        elif state.pair_state.startswith('remotely'):
            if state.folderish:
                self._remote_folder_queue.put(state)
                log.trace('Pushed to _remote_folder_queue, now of size: %d', self._remote_folder_queue.qsize())
            else:
                if "deleted" in state.pair_state:
                    self._engine.cancel_action_on(state.id)
                self._remote_file_queue.put(state)
                log.trace('Pushed to _remote_file_queue, now of size: %d', self._remote_file_queue.qsize())
            self.newItem.emit(row_id)
        else:
            # deleted and conflicted
            log.debug("Not processable state: %r", state)

    @pyqtSlot()
    def _on_error_timer(self):
        for item in self._on_error_queue.process_items():
            doc_pair = item.get()
            queueItem = QueueItem(doc_pair.id, doc_pair.folderish, doc_pair.pair_state)
            log.debug('Retrying blacklisted doc_pair: %r', doc_pair)
            self.push(queueItem)

        if self._on_error_queue.is_empty():
            self._error_timer.stop()
            log.debug('blacklist queue timer stopped')

    def _is_on_error(self, row_id):
        return self._on_error_queue.exists(row_id)

    @pyqtSlot()
    def _on_new_error(self):
        self._error_timer.start(1000)
        log.debug('blacklist queue timer started')

    def get_errors_count(self):
        return self._on_error_queue.size()

    def get_error_threshold(self):
        return self._error_threshold

    def push_error(self, doc_pair, exception=None):
        error_count = doc_pair.error_count
        if (exception is not None and type(exception) == WindowsError
            and hasattr(exception, 'winerror') and exception.winerror == WINERROR_CODE_PROCESS_CANNOT_ACCESS_FILE):
            log.debug("Detected WindowsError with code %d: '%s', won't increase next try interval",
                      WINERROR_CODE_PROCESS_CANNOT_ACCESS_FILE,
                      exception.strerror if hasattr(exception, 'strerror') else '')
            error_count = 1
        if error_count > self._error_threshold:
            self._on_error_queue.remove(doc_pair.id)
            # TODO this signal is not connected
            self.newErrorGiveUp.emit(doc_pair.id)
            log.debug("Giving up on pair : %r", doc_pair)
            return

        interval = self._on_error_queue.push(doc_pair.id, doc_pair, count=doc_pair.error_count)
        log.debug("Blacklisting pair for %ds (error count=%d): %r", interval, doc_pair.error_count, doc_pair)
        if not self._error_timer.isActive():
            self.newError.emit(doc_pair.id)

    def requeue_errors(self):
        for doc_pair in self._on_error_queue.items():
            doc_pair.error_next_try = 0

    def _get_local_folder(self):
        if self._local_folder_queue.empty():
            return None
        try:
            state = self._local_folder_queue.get(True, 3)
        except Empty:
            return None
        if state is not None and self._is_on_error(state.id):
            return self._get_local_folder()
        return state

    def _get_local_file(self):
        if self._local_file_queue.empty():
            return None
        try:
            state = self._local_file_queue.get(True, 3)
        except Empty:
            return None
        if state is not None and self._is_on_error(state.id):
            return self._get_local_file()
        return state

    def _get_remote_folder(self):
        if self._remote_folder_queue.empty():
            return None
        try:
            state = self._remote_folder_queue.get(True, 3)
        except Empty:
            return None
        if state is not None and self._is_on_error(state.id):
            return self._get_remote_folder()
        return state

    def _get_remote_file(self):
        if self._remote_file_queue.empty():
            return None
        try:
            state = self._remote_file_queue.get(True, 3)
        except Empty:
            return None
        if state is not None and self._is_on_error(state.id):
            return self._get_remote_file()
        return state

    def _get_file(self):
        self._get_file_lock.acquire()
        if self._remote_file_queue.empty() and self._local_file_queue.empty():
            self._get_file_lock.release()
            return None
        state = None
        if (self._remote_file_queue.qsize() > self._local_file_queue.qsize()):
            state = self._get_remote_file()
        else:
            state = self._get_local_file()
        self._get_file_lock.release()
        if state is not None and self._is_on_error(state.id):
            return self._get_file()
        return state

    @pyqtSlot()
    def _thread_finished(self):
        self._thread_inspection.acquire()
        try:
            for thread in self._processors_pool:
                if thread.isFinished():
                    self._processors_pool.remove(thread)
            if (self._local_folder_thread is not None and
                    self._local_folder_thread.isFinished()):
                self._local_folder_thread = None
            if (self._local_file_thread is not None and
                    self._local_file_thread.isFinished()):
                self._local_file_thread = None
            if (self._remote_folder_thread is not None and
                    self._remote_folder_thread.isFinished()):
                self._remote_folder_thread = None
            if (self._remote_file_thread is not None and
                    self._remote_file_thread.isFinished()):
                self._remote_file_thread = None
            if not self._engine.is_paused() and not self._engine.is_stopped():
                self.newItem.emit(None)
        finally:
            self._thread_inspection.release()

    def active(self):
        # Recheck threads
        self._thread_finished()
        return self.is_active()

    def is_active(self):
        return (self._local_folder_thread is not None
                or self._local_file_thread is not None
                or self._remote_file_thread is not None
                or self._remote_folder_thread is not None
                or len(self._processors_pool) > 0)

    def _create_thread(self, item_getter, name=None):
        processor = self._engine.create_processor(item_getter, name=name)
        thread = self._engine.create_thread(worker=processor)
        thread.finished.connect(self._thread_finished)
        thread.terminated.connect(self._thread_finished)
        thread.start()
        return thread

    def get_metrics(self):
        metrics = dict()
        metrics["local_folder_queue"] = self._local_folder_queue.qsize()
        metrics["local_file_queue"] = self._local_file_queue.qsize()
        metrics["remote_folder_queue"] = self._remote_folder_queue.qsize()
        metrics["remote_file_queue"] = self._remote_file_queue.qsize()
        metrics["remote_file_thread"] = self._remote_file_thread is not None
        metrics["remote_folder_thread"] = self._remote_folder_thread is not None
        metrics["local_file_thread"] = self._local_file_thread is not None
        metrics["local_folder_thread"] = self._local_folder_thread is not None
        metrics["error_queue"] = self.get_errors_count()
        metrics["total_queue"] = (metrics["local_folder_queue"] + metrics["local_file_queue"]
                                + metrics["remote_folder_queue"] + metrics["remote_file_queue"])
        metrics["additional_processors"] = len(self._processors_pool)
        return metrics

    def get_overall_size(self):
        return (self._local_folder_queue.qsize() + self._local_file_queue.qsize()
                + self._remote_folder_queue.qsize() + self._remote_file_queue.qsize())

    def is_processing_file(self, worker, path, exact_match=False):
        if not hasattr(worker, "_current_doc_pair"):
            return False
        doc_pair = worker._current_doc_pair
        if (doc_pair is None or doc_pair.local_path is None):
            return False
        if exact_match:
            result = doc_pair.local_path == path
        else:
            result = doc_pair.local_path.startswith(path)
        if result:
            log.trace("Worker(%r) is processing: %r", worker.get_metrics(), path)
        return result

    def interrupt_processors_on(self, path, exact_match=True):
        for proc in self.get_processors_on(path, exact_match):
            proc.stop()

    def get_processors_on(self, path, exact_match=True):
        self._thread_inspection.acquire()
        try:
            res = []
            if self._local_folder_thread is not None:
                if self.is_processing_file(self._local_folder_thread.worker, path, exact_match):
                    res.append(self._local_folder_thread.worker)
            if self._remote_folder_thread is not None:
                if self.is_processing_file(self._remote_folder_thread.worker, path, exact_match):
                    res.append(self._remote_folder_thread.worker)
            if self._local_file_thread is not None:
                if self.is_processing_file(self._local_file_thread.worker, path, exact_match):
                    res.append(self._local_file_thread.worker)
            if self._remote_file_thread is not None:
                if self.is_processing_file(self._remote_file_thread.worker, path, exact_match):
                    res.append(self._remote_file_thread.worker)
            for thread in self._processors_pool:
                if self.is_processing_file(thread.worker, path, exact_match):
                    res.append(thread.worker)
            return res
        finally:
            self._thread_inspection.release()

    def has_file_processors_on(self, path):
        self._thread_inspection.acquire()
        try:
            # First check local and remote file
            if self._local_file_thread is not None:
                if self.is_processing_file(self._local_file_thread.worker, path):
                    return True
            if self._remote_file_thread is not None:
                if self.is_processing_file(self._remote_file_thread.worker, path):
                    return True
            for thread in self._processors_pool:
                if self.is_processing_file(thread.worker, path):
                    return True
            return False
        finally:
            self._thread_inspection.release()

    @pyqtSlot()
    def launch_processors(self):
        if (self._disable or self.is_paused() or (self._local_folder_queue.empty() and self._local_file_queue.empty()
                and self._remote_folder_queue.empty() and self._remote_file_queue.empty())):
            self.queueEmpty.emit()
            if not self.is_active():
                self.queueFinishedProcessing.emit()
            return
        log.trace("Launching processors")
        if self._local_folder_thread is None and not self._local_folder_queue.empty() and self._local_folder_enable:
            log.debug("creating local folder processor")
            self._local_folder_thread = self._create_thread(self._get_local_folder, name="LocalFolderProcessor")
        if self._local_file_thread is None and not self._local_file_queue.empty() and self._local_file_enable:
            log.debug("creating local file processor")
            self._local_file_thread = self._create_thread(self._get_local_file, name="LocalFileProcessor")
        if self._remote_folder_thread is None and not self._remote_folder_queue.empty() and self._remote_folder_enable:
            log.debug("creating remote folder processor")
            self._remote_folder_thread = self._create_thread(self._get_remote_folder, name="RemoteFolderProcessor")
        if self._remote_file_thread is None and not self._remote_file_queue.empty() and self._remote_file_enable:
            log.debug("creating remote file processor")
            self._remote_file_thread = self._create_thread(self._get_remote_file, name="RemoteFileProcessor")
        if self._remote_file_queue.qsize() + self._local_file_queue.qsize() == 0:
            return
        while len(self._processors_pool) < self._max_processors:
            log.debug("creating additional file processor")
            self._processors_pool.append(self._create_thread(self._get_file, name="GenericProcessor"))