def _convert_books(self, rows): #NODOC db = self.gui.current_db if not rows or len(rows) == 0: return error_dialog(self.gui, _('Cannot convert'), _('No books selected'), show=True) if self.gui.current_view() is self.gui.library_view: ids = list(map(self.gui.library_view.model().id, rows)) for book_id in ids: if db.has_format(book_id, 'DJVU', index_is_id=True): continue if db.has_format(book_id, 'PDF', index_is_id=True): path_to_ebook = db.format_abspath(book_id, 'pdf', index_is_id=True) job = ThreadedJob('ConvertToDJVU', 'Converting %s to DJVU' % path_to_ebook, func=self._tjob_djvu_convert, args=(db, book_id, None, 'pdf'), #by book_id! kwargs={}, callback=self._tjob_refresh_books) # there is an assumed log=GUILog() ! src/calibre/utils/logging.py self.gui.job_manager.run_threaded_job(job) # too bad console utils and filetype plugins can't start a jobmanager..fork_job is # a wretch else: # !gui_library # looking at a device's flash contents or some other non-library store, # filepaths here are not to be tracked in the db fpaths = self.gui.current_view().model().paths(rows) for fpath in fpaths: job = ThreadedJob('ConvertToDJVU', 'Converting %s to DJVU' % path_to_ebook, func=self._tjob_djvu_convert, args=(None, None, fpath, 'pdf'), #by fpath! kwargs={}) self.gui.job_manager.run_threaded_job(job)
def send_files(self): '''Sends files depending on user's settings''' xray_creator = self._get_books('Cannot send Files') if xray_creator: job = ThreadedJob('send_files', 'Sending Files to Device', xray_creator.send_files_event, ((self.gui.current_db.new_api,)), {}, Dispatcher(self.sent_files)) self.gui.job_manager.run_threaded_job(job)
def create_files(self): '''Creates files depending on user's settings''' xray_creator = self._get_books('Cannot create Files') if xray_creator: job = ThreadedJob('create_files', 'Creating Files', xray_creator.create_files_event, ((self.gui.current_db.new_api,)), {}, Dispatcher(self.created_files)) self.gui.job_manager.run_threaded_job(job)
def send_mails(jobnames, callback, attachments, to_s, subjects, texts, attachment_names, job_manager): for name, attachment, to, subject, text, aname in zip( jobnames, attachments, to_s, subjects, texts, attachment_names): description = _('Email %(name)s to %(to)s') % dict(name=name, to=to) job = ThreadedJob('email', description, gui_sendmail, (attachment, aname, to, subject, text), {}, callback) job_manager.run_threaded_job(job)
def create_jobs(self, job=None, install=False): if self.job_failed(job): return for data in self.metadata_list: title = data[-1].get('title') job = ThreadedJob( "WordDumb's dumb job", f'Generating Word Wise for {title}', do_job, (data, install), {}, Dispatcher(partial(self.done, data=data, title=title))) self.gui.job_manager.run_threaded_job(job)
def start_casanova_upload(callback, job_manager, gui, title, authors, description, one_liner, issues, opf, file_loc, mi, book_id): description = _('Adding %s') % title job = ThreadedJob('casanova_add', description, gui_casanova_adder, (gui, title, authors, description, one_liner, issues, opf, file_loc, mi, book_id), {}, callback, max_concurrent_count=1, killable=False) job_manager.run_threaded_job(job)
def send_mails(jobnames, callback, attachments, to_s, subjects, texts, attachment_names, job_manager): for name, attachment, to, subject, text, aname in zip(jobnames, attachments, to_s, subjects, texts, attachment_names): description = _('Email %(name)s to %(to)s') % dict(name=name, to=to) if isinstance(to, str) and ('@pbsync.com' in to or '@kindle.com' in to): # The pbsync service chokes on non-ascii filenames # Dont know if amazon's service chokes or not, but since filenames # arent visible on Kindles anyway, might as well be safe aname = ascii_filename(aname) job = ThreadedJob('email', description, gui_sendmail, (attachment, aname, to, subject, text), {}, callback) job_manager.run_threaded_job(job)
def start_extract_threaded(gui, ids, callback): ''' This approach to extracting an ISBN uses an in-process Thread to perform the work. This offers high performance, but suffers from memory leaks in the Calibre conversion process and will make the GUI less responsive for large numbers of books. It is retained only for the purposes of extracting a single ISBN as it is considerably faster than the out of process approach. ''' job = ThreadedJob('extract isbn plugin', _('Extract ISBN for %d books')%len(ids), extract_threaded, (ids, gui.current_db), {}, callback) gui.job_manager.run_threaded_job(job) gui.status_bar.show_message(_('Extract ISBN started'), 3000)
def parse(self): # get currently selected books rows = self.gui.library_view.selectionModel().selectedRows() if not rows or len(rows) == 0: return self.ids = list(map(self.gui.library_view.model().id, rows)) if len(self.ids) == 0: return job = ThreadedJob('Generating Word Wise', 'Generating Word Wise', do_job, (self.gui.current_db.new_api, self.ids, self.plugin_path), {}, Dispatcher(self.done)) self.gui.job_manager.run_threaded_job(job) self.gui.status_bar.show_message("Generating Word Wise")
def start_casanova_download(callback, job_manager, gui, url='', filename='', save_loc='', id=False): description = _('Downloading %s') % filename.decode( 'utf-8', 'ignore') if filename else url.decode('utf-8', 'ignore') job = ThreadedJob('casanova_download', description, gui_casanova_download, (gui, url, filename, save_loc, id), {}, callback, max_concurrent_count=2, killable=False) job_manager.run_threaded_job(job)
def startImport(self): from pprint import pprint plugin_prefs = JSONConfig('plugins/Mendeley') job = ThreadedJob('Mendeley_importer', 'Importing Mendeley Documents', func=do_work, args=(), kwargs={}, callback=self.importer_finished) self.gui.job_manager.run_threaded_job(job) self.startImportButton.setEnabled(False) self.helpl.setText( 'Importing documents. You can close the dialog. See the progress in the Calibre jobs (see the Status Bar).' )
class ParseBook(): def __init__(self, gui): self.gui = gui self.metadata_list = [] def parse(self, create_ww=True, create_x=True): # get currently selected books rows = self.gui.library_view.selectionModel().selectedRows() if not rows or len(rows) == 0: return ids = list(map(self.gui.library_view.model().id, rows)) if len(ids) == 0: return for book_id in ids: if (data := check_metadata(self.gui.current_db.new_api, book_id)) is None: continue self.metadata_list.append(data) if len(self.metadata_list) == 0: return for data in self.metadata_list: if data[-1]['wiki'] != 'en': create_ww = False title = data[-2].get('title') notif = [] if create_ww: notif.append('Word Wise') if create_x: notif.append('X-Ray') notif = ' and '.join(notif) job = ThreadedJob( "WordDumb's dumb job", f'Generating {notif} for {title}', do_job, (data, create_ww, create_x), {}, Dispatcher(partial(self.done, data=data, notif=f'{notif} generated for {title}'))) self.gui.job_manager.run_threaded_job(job) self.gui.jobs_pointer.start()
def start_ebook_download(callback, job_manager, gui, cookie_file=None, url='', filename='', save_loc='', add_to_lib=True, tags=[]): description = _('Downloading %s') % filename.decode( 'utf-8', 'ignore') if filename else url.decode('utf-8', 'ignore') job = ThreadedJob( 'ebook_download', description, gui_ebook_download, (gui, cookie_file, url, filename, save_loc, add_to_lib, tags), {}, callback, max_concurrent_count=2, killable=False) job_manager.run_threaded_job(job)
def start_ebook_download(callback, job_manager, gui, cookie_file=None, url='', filename='', save_loc='', add_to_lib=True, tags=[], create_browser=None): description = _('Downloading %s') % as_unicode(filename or url, errors='replace') job = ThreadedJob('ebook_download', description, gui_ebook_download, (gui, cookie_file, url, filename, save_loc, add_to_lib, tags, create_browser), {}, callback, max_concurrent_count=2, killable=False) job_manager.run_threaded_job(job)
return for book_id in ids: if (data := check_metadata(self.gui.current_db.new_api, book_id)) is None: continue self.metadata_list.append(data) if (books := len(self.metadata_list)) == 0: return if books == 1: self.create_jobs(install=True) else: job = ThreadedJob("WordDumb's dumb job", 'Install dependencies', install_libs, (), {}, Dispatcher(self.create_jobs)) self.gui.job_manager.run_threaded_job(job) self.gui.jobs_pointer.start() self.gui.status_bar.show_message(f'Generating Word Wise for {books} ' f'{"books" if books > 1 else "book"}') def create_jobs(self, job=None, install=False): if self.job_failed(job): return for data in self.metadata_list: title = data[-1].get('title') job = ThreadedJob( "WordDumb's dumb job", f'Generating Word Wise for {title}',
def get_upload_books_job(books, db): return ThreadedJob('remarkablecloud', 'Upload files to reMarkable Cloud', upload_books, (books, db), {}, None)