Exemplo n.º 1
0
 def wait(self, timeout=30):
     if timeout is None:
         self.requests.join()
     else:
         try:
             join_with_timeout(self.requests, timeout)
         except RuntimeError:
             return False
     return True
Exemplo n.º 2
0
Arquivo: save.py Projeto: kba/calibre
 def wait(self, timeout=30):
     if timeout is None:
         self.requests.join()
     else:
         try:
             join_with_timeout(self.requests, timeout)
         except RuntimeError:
             return False
     return True
Exemplo n.º 3
0
 def wait_for_tasks(self, timeout=None):
     ''' Wait for all queued jobs to be completed, if timeout is not None,
     will raise a RuntimeError if jobs are not completed in the specified
     time. Will raise a :class:`Failure` exception if a terminal failure has
     occurred previously. '''
     if self.failed:
         raise Failure(self.terminal_failure)
     if timeout is None:
         self.tracker.join()
     else:
         join_with_timeout(self.tracker, timeout)
Exemplo n.º 4
0
 def wait_for_tasks(self, timeout=None):
     ''' Wait for all queued jobs to be completed, if timeout is not None,
     will raise a RuntimeError if jobs are not completed in the specified
     time. Will raise a :class:`Failure` exception if a terminal failure has
     occurred previously. '''
     if self.failed:
         raise Failure(self.terminal_failure)
     if timeout is None:
         self.tracker.join()
     else:
         join_with_timeout(self.tracker, timeout)
Exemplo n.º 5
0
    def monitor_pool(self):
        try:
            worker_result = self.pool.results.get(True, 0.05)
            self.pool.results.task_done()
        except Empty:
            try:
                self.pool.wait_for_tasks(timeout=0.01)
            except RuntimeError:
                pass  # Tasks still remaining
            except Failure as err:
                error_dialog(
                    self.pd,
                    _("Cannot add books"),
                    _('Failed to add some books, click "Show details" for more information.'),
                    det_msg=unicode(err.failure_message) + "\n" + unicode(err.details),
                    show=True,
                )
                self.pd.canceled = True
            else:
                # All tasks completed
                try:
                    join_with_timeout(self.pool.results, 0.01)
                except RuntimeError:
                    pass  # There are results remaining
                else:
                    # No results left
                    self.process_duplicates()
                    return
        else:
            group_id = worker_result.id
            if worker_result.is_terminal_failure:
                error_dialog(
                    self.pd,
                    _("Critical failure"),
                    _(
                        "The read metadata worker process crashed while processing"
                        ' some files. Adding of books is aborted. Click "Show details"'
                        " to see which files caused the problem."
                    ),
                    show=True,
                    det_msg="\n".join(self.file_groups[group_id]),
                )
                self.pd.canceled = True
            else:
                try:
                    self.process_result(group_id, worker_result.result)
                except Exception:
                    self.report_metadata_failure(group_id, traceback.format_exc())
                self.pd.value += 1

        self.do_one_signal.emit()
Exemplo n.º 6
0
    def monitor_pool(self):
        try:
            worker_result = self.pool.results.get(True, 0.05)
            self.pool.results.task_done()
        except Empty:
            try:
                self.pool.wait_for_tasks(timeout=0.01)
            except RuntimeError:
                pass  # Tasks still remaining
            except Failure as err:
                error_dialog(
                    self.pd,
                    _('Cannot add books'),
                    _('Failed to add some books, click "Show details" for more information.'
                      ),
                    det_msg=unicode_type(err.failure_message) + '\n' +
                    unicode_type(err.details),
                    show=True)
                self.pd.canceled = True
            else:
                # All tasks completed
                try:
                    join_with_timeout(self.pool.results, 0.01)
                except RuntimeError:
                    pass  # There are results remaining
                else:
                    # No results left
                    self.process_duplicates()
                    return
        else:
            group_id = worker_result.id
            if worker_result.is_terminal_failure:
                error_dialog(
                    self.pd,
                    _('Critical failure'),
                    _('The read metadata worker process crashed while processing'
                      ' some files. Adding of books is aborted. Click "Show details"'
                      ' to see which files caused the problem.'),
                    show=True,
                    det_msg='\n'.join(self.file_groups[group_id]))
                self.pd.canceled = True
            else:
                try:
                    self.process_result(group_id, worker_result.result)
                except Exception:
                    self.report_metadata_failure(group_id,
                                                 traceback.format_exc())
                self.pd.value += 1

        self.do_one_signal.emit()
Exemplo n.º 7
0
 def set_database(self, newdb, stage=0):
     if stage == 0:
         self.ignore_render_requests.set()
         try:
             for x in (self.delegate.cover_cache, self.thumbnail_cache):
                 self.model().db.new_api.remove_cover_cache(x)
         except AttributeError:
             pass  # db is None
         for x in (self.delegate.cover_cache, self.thumbnail_cache):
             newdb.new_api.add_cover_cache(x)
         try:
             # Use a timeout so that if, for some reason, the render thread
             # gets stuck, we dont deadlock, future covers wont get
             # rendered, but this is better than a deadlock
             join_with_timeout(self.delegate.render_queue)
         except RuntimeError:
             print('Cover rendering thread is stuck!')
         finally:
             self.ignore_render_requests.clear()
     else:
         self.delegate.cover_cache.clear()
Exemplo n.º 8
0
 def set_database(self, newdb, stage=0):
     if stage == 0:
         self.ignore_render_requests.set()
         try:
             for x in (self.delegate.cover_cache, self.thumbnail_cache):
                 self.model().db.new_api.remove_cover_cache(x)
         except AttributeError:
             pass  # db is None
         for x in (self.delegate.cover_cache, self.thumbnail_cache):
             newdb.new_api.add_cover_cache(x)
         try:
             # Use a timeout so that if, for some reason, the render thread
             # gets stuck, we dont deadlock, future covers wont get
             # rendered, but this is better than a deadlock
             join_with_timeout(self.delegate.render_queue)
         except RuntimeError:
             print('Cover rendering thread is stuck!')
         finally:
             self.ignore_render_requests.clear()
     else:
         self.delegate.cover_cache.clear()