def _save_settings(self): from calibre.gui2.dialogs.confirm_delete import confirm message = '<p>' + _('Are you sure you want to save this setting in this library for this plugin?') + '</p>' \ + '<p>' + _('Any settings in other libraries or stored in a JSON file in your calibre plugins folder will not be touched.') + '</p>' \ + '<p>' + _('You must restart calibre afterwards.') + '</p>' if not confirm(message, self.namespace + '_save_settings', self): return ns_prefix = self._get_ns_prefix() key = unicode(self.keys_list.currentItem().text()) self.db.prefs.set_namespaced( self.namespace, key, self.db.prefs.raw_to_object(self.value_text.toPlainText())) d = info_dialog(self, 'Settings saved', '<p>' + _('All settings for this plugin in this library have been saved.') + '</p>' \ + '<p>' + _('Please restart calibre now.') + '</p>', show_copy_button=False) b = d.bb.addButton(_('Restart calibre now'), d.bb.AcceptRole) b.setIcon(QIcon(I('lt.png'))) d.do_restart = False def rf(): d.do_restart = True b.clicked.connect(rf) d.set_details('') d.exec_() b.clicked.disconnect() self.close() if d.do_restart: self.gui.quit(restart=True)
def _current_row_changed(self, new_row): if new_row < 0: self.value_text.clear() return key = unicode(self.keys_list.currentItem().text()) val = self.db.prefs.get_namespaced(self.namespace, key, '') self.value_text.setPlainText(self.db.prefs.to_raw(val))
def _read_epub_contents(iterator, strip_html=False): ''' Given an iterator for an ePub file, read the contents into a giant block of text ''' book_files = [] for path in iterator.spine: with open(path, 'rb') as f: html = f.read().decode('utf-8', 'replace') if strip_html: html = unicode(_extract_body_text(html)).strip() #print('FOUND HTML:', html) book_files.append(html) return ''.join(book_files)
def get_configuration(url, passed_defaultsini, passed_personalini, options, chaptercount=None, output_filename=None): try: configuration = Configuration(adapters.getConfigSectionsFor(url), options.format) except exceptions.UnknownSite as e: if options.list or options.normalize or options.downloadlist: # list for page doesn't have to be a supported site. configuration = Configuration(['unknown'], options.format) else: raise conflist = [] homepath = join(expanduser('~'), '.fanficdownloader') ## also look for .fanficfare now, give higher priority than old dir. homepath2 = join(expanduser('~'), '.fanficfare') xdgpath = os.environ.get('XDG_CONFIG_HOME', join(expanduser('~'), '.config')) xdgpath = join(xdgpath, 'fanficfare') if passed_defaultsini: # new StringIO each time rather than pass StringIO and rewind # for case of list download. Just makes more sense to me. configuration.readfp(StringIO(unicode(passed_defaultsini))) else: # don't need to check existance for our selves. conflist.append(join(dirname(__file__), 'defaults.ini')) conflist.append(join(homepath, 'defaults.ini')) conflist.append(join(homepath2, 'defaults.ini')) conflist.append(join(xdgpath, 'defaults.ini')) conflist.append('defaults.ini') if passed_personalini: # new StringIO each time rather than pass StringIO and rewind # for case of list download. Just makes more sense to me. configuration.readfp(StringIO(unicode(passed_personalini))) conflist.append(join(homepath, 'personal.ini')) conflist.append(join(homepath2, 'personal.ini')) conflist.append(join(xdgpath, 'personal.ini')) conflist.append('personal.ini') if options.configfile: conflist.extend(options.configfile) configuration.read(conflist) try: configuration.add_section('overrides') except configparser.DuplicateSectionError: # generally already exists in defaults.ini pass if options.force: configuration.set('overrides', 'always_overwrite', 'true') if options.update and chaptercount and output_filename: configuration.set('overrides', 'output_filename', output_filename) if options.update and not options.updatecover: configuration.set('overrides', 'never_make_cover', 'true') # images only for epub, even if the user mistakenly turned it # on else where. if options.format not in ('epub', 'html'): configuration.set('overrides', 'include_images', 'false') if options.options: for opt in options.options: (var, val) = opt.split('=') configuration.set('overrides', var, val) if options.progressbar: configuration.set('overrides', 'progressbar', 'true') ## do page cache and cookie load after reading INI files because ## settings (like use_basic_cache) matter. ## only need browser cache if one of the URLs needs it, and it ## isn't saved or dependent on options.save_cache. This needs to ## be above basic_cache to avoid loading more than once anyway. if configuration.getConfig('use_browser_cache'): if not hasattr(options, 'browser_cache'): configuration.get_fetcher() # force browser cache read. options.browser_cache = configuration.get_browser_cache() else: configuration.set_browser_cache(options.browser_cache) ## Share basic_cache between multiple downloads. if not hasattr(options, 'basic_cache'): options.basic_cache = configuration.get_basic_cache() if options.save_cache: try: options.basic_cache.load_cache(global_cache) except Exception as e: logger.warning( "Didn't load --save-cache %s\nContinue without loading BasicCache" % e) options.basic_cache.set_autosave(True, filename=global_cache) else: configuration.set_basic_cache(options.basic_cache) # logger.debug(options.basic_cache.basic_cache.keys()) ## All CLI downloads are sequential and share one cookiejar, ## loaded the first time through here. if not hasattr(options, 'cookiejar'): options.cookiejar = configuration.get_cookiejar() if options.save_cache: try: options.cookiejar.load_cookiejar(global_cookies) except Exception as e: logger.warning( "Didn't load --save-cache %s\nContinue without loading cookies" % e) options.cookiejar.set_autosave(True, filename=global_cookies) else: configuration.set_cookiejar(options.cookiejar) return configuration
def selected_key(self): for key, value in six.iteritems(self.values): if value == unicode(self.currentText()).strip(): return key
def do_download_for_worker(book,options,merge,notification=lambda x,y:x): ''' Child job, to download story when run as a worker job ''' from calibre_plugins.fanficfare_plugin import FanFicFareBase fffbase = FanFicFareBase(options['plugin_path']) with fffbase: # so the sys.path was modified while loading the # plug impl. from calibre_plugins.fanficfare_plugin.dialogs import NotGoingToDownload from calibre_plugins.fanficfare_plugin.prefs import (OVERWRITE, OVERWRITEALWAYS, UPDATE, UPDATEALWAYS, ADDNEW, SKIP, CALIBREONLY, CALIBREONLYSAVECOL) try: from calibre_plugins.fanficfare_plugin.fanficfare import adapters, writers from calibre_plugins.fanficfare_plugin.fanficfare.epubutils import get_update_data except: from fanficfare import adapters, writers from fanficfare.epubutils import get_update_data from calibre_plugins.fanficfare_plugin.fff_util import get_fff_config try: logger.info("\n\n" + ("-"*80) + " " + book['url']) ## No need to download at all. Can happen now due to ## collision moving into book for CALIBREONLY changing to ## ADDNEW when story URL not in library. if book['collision'] in (CALIBREONLY, CALIBREONLYSAVECOL): logger.info("Skipping CALIBREONLY 'update' down inside worker") return book book['comment'] = _('Download started...') configuration = get_fff_config(book['url'], options['fileform'], options['personal.ini']) if not options['updateepubcover'] and 'epub_for_update' in book and book['collision'] in (UPDATE, UPDATEALWAYS): configuration.set("overrides","never_make_cover","true") # images only for epub, html, even if the user mistakenly # turned it on else where. if options['fileform'] not in ("epub","html"): configuration.set("overrides","include_images","false") adapter = adapters.getAdapter(configuration,book['url']) adapter.is_adult = book['is_adult'] adapter.username = book['username'] adapter.password = book['password'] adapter.setChaptersRange(book['begin'],book['end']) ## each site download job starts with a new copy of the ## cookiejar and basic_cache from the FG process. They ## are not shared between different sites' BG downloads if configuration.getConfig('use_browser_cache'): if 'browser_cache' in options: configuration.set_browser_cache(options['browser_cache']) else: options['browser_cache'] = configuration.get_browser_cache() if 'browser_cachefile' in options: options['browser_cache'].load_cache(options['browser_cachefile']) if 'basic_cache' in options: configuration.set_basic_cache(options['basic_cache']) else: options['basic_cache'] = configuration.get_basic_cache() options['basic_cache'].load_cache(options['basic_cachefile']) if 'cookiejar' in options: configuration.set_cookiejar(options['cookiejar']) else: options['cookiejar'] = configuration.get_cookiejar() options['cookiejar'].load_cookiejar(options['cookiejarfile']) story = adapter.getStoryMetadataOnly() if not story.getMetadata("series") and 'calibre_series' in book: adapter.setSeries(book['calibre_series'][0],book['calibre_series'][1]) # set PI version instead of default. if 'version' in options: story.setMetadata('version',options['version']) book['title'] = story.getMetadata("title", removeallentities=True) book['author_sort'] = book['author'] = story.getList("author", removeallentities=True) book['publisher'] = story.getMetadata("publisher") book['url'] = story.getMetadata("storyUrl", removeallentities=True) book['tags'] = story.getSubjectTags(removeallentities=True) book['comments'] = story.get_sanitized_description() book['series'] = story.getMetadata("series", removeallentities=True) if story.getMetadataRaw('datePublished'): book['pubdate'] = story.getMetadataRaw('datePublished').replace(tzinfo=local_tz) if story.getMetadataRaw('dateUpdated'): book['updatedate'] = story.getMetadataRaw('dateUpdated').replace(tzinfo=local_tz) if story.getMetadataRaw('dateCreated'): book['timestamp'] = story.getMetadataRaw('dateCreated').replace(tzinfo=local_tz) else: book['timestamp'] = datetime.now().replace(tzinfo=local_tz) # need *something* there for calibre. writer = writers.getWriter(options['fileform'],configuration,adapter) outfile = book['outfile'] ## checks were done earlier, it's new or not dup or newer--just write it. if book['collision'] in (ADDNEW, SKIP, OVERWRITE, OVERWRITEALWAYS) or \ ('epub_for_update' not in book and book['collision'] in (UPDATE, UPDATEALWAYS)): # preserve logfile even on overwrite. if 'epub_for_update' in book: adapter.logfile = get_update_data(book['epub_for_update'])[6] # change the existing entries id to notid so # write_epub writes a whole new set to indicate overwrite. if adapter.logfile: adapter.logfile = adapter.logfile.replace("span id","span notid") if book['collision'] == OVERWRITE and 'fileupdated' in book: lastupdated=story.getMetadataRaw('dateUpdated') fileupdated=book['fileupdated'] # updated doesn't have time (or is midnight), use dates only. # updated does have time, use full timestamps. if (lastupdated.time() == time.min and fileupdated.date() > lastupdated.date()) or \ (lastupdated.time() != time.min and fileupdated > lastupdated): raise NotGoingToDownload(_("Not Overwriting, web site is not newer."),'edit-undo.png',showerror=False) logger.info("write to %s"%outfile) inject_cal_cols(book,story,configuration) writer.writeStory(outfilename=outfile, forceOverwrite=True, notification=notification) if adapter.story.chapter_error_count > 0: book['comment'] = _('Download %(fileform)s completed, %(failed)s failed chapters, %(total)s total chapters.')%\ {'fileform':options['fileform'], 'failed':adapter.story.chapter_error_count, 'total':story.getMetadata("numChapters")} book['chapter_error_count'] = adapter.story.chapter_error_count else: book['comment'] = _('Download %(fileform)s completed, %(total)s chapters.')%\ {'fileform':options['fileform'], 'total':story.getMetadata("numChapters")} book['all_metadata'] = story.getAllMetadata(removeallentities=True) if options['savemetacol'] != '': book['savemetacol'] = story.dump_html_metadata() ## checks were done earlier, just update it. elif 'epub_for_update' in book and book['collision'] in (UPDATE, UPDATEALWAYS): # update now handled by pre-populating the old images and # chapters in the adapter rather than merging epubs. #urlchaptercount = int(story.getMetadata('numChapters').replace(',','')) # returns int adjusted for start-end range. urlchaptercount = story.getChapterCount() (url, chaptercount, adapter.oldchapters, adapter.oldimgs, adapter.oldcover, adapter.calibrebookmark, adapter.logfile, adapter.oldchaptersmap, adapter.oldchaptersdata) = get_update_data(book['epub_for_update'])[0:9] # dup handling from fff_plugin needed for anthology updates. if book['collision'] == UPDATE: if chaptercount == urlchaptercount: if merge: book['comment']=_("Already contains %d chapters. Reuse as is.")%chaptercount book['all_metadata'] = story.getAllMetadata(removeallentities=True) if options['savemetacol'] != '': book['savemetacol'] = story.dump_html_metadata() book['outfile'] = book['epub_for_update'] # for anthology merge ops. return book else: # not merge, raise NotGoingToDownload(_("Already contains %d chapters.")%chaptercount,'edit-undo.png',showerror=False) elif chaptercount > urlchaptercount: raise NotGoingToDownload(_("Existing epub contains %d chapters, web site only has %d. Use Overwrite to force update.") % (chaptercount,urlchaptercount),'dialog_error.png') elif chaptercount == 0: raise NotGoingToDownload(_("FanFicFare doesn't recognize chapters in existing epub, epub is probably from a different source. Use Overwrite to force update."),'dialog_error.png') if not (book['collision'] == UPDATEALWAYS and chaptercount == urlchaptercount) \ and adapter.getConfig("do_update_hook"): chaptercount = adapter.hookForUpdates(chaptercount) logger.info("Do update - epub(%d) vs url(%d)" % (chaptercount, urlchaptercount)) logger.info("write to %s"%outfile) inject_cal_cols(book,story,configuration) writer.writeStory(outfilename=outfile, forceOverwrite=True, notification=notification) if adapter.story.chapter_error_count > 0: book['comment'] = _('Update %(fileform)s completed, added %(added)s chapters, %(failed)s failed chapters, for %(total)s total.')%\ {'fileform':options['fileform'], 'failed':adapter.story.chapter_error_count, 'added':(urlchaptercount-chaptercount), 'total':urlchaptercount} book['chapter_error_count'] = adapter.story.chapter_error_count else: book['comment'] = _('Update %(fileform)s completed, added %(added)s chapters for %(total)s total.')%\ {'fileform':options['fileform'],'added':(urlchaptercount-chaptercount),'total':urlchaptercount} book['all_metadata'] = story.getAllMetadata(removeallentities=True) if options['savemetacol'] != '': book['savemetacol'] = story.dump_html_metadata() else: ## Shouldn't ever get here, but hey, it happened once ## before with prefs['collision'] raise Exception("Impossible state reached -- Book: %s:\nOptions:%s:"%(book,options)) if options['do_wordcount'] == SAVE_YES or ( options['do_wordcount'] == SAVE_YES_UNLESS_SITE and not story.getMetadataRaw('numWords') ): wordcount = get_word_count(outfile) # logger.info("get_word_count:%s"%wordcount) story.setMetadata('numWords',wordcount) writer.writeStory(outfilename=outfile, forceOverwrite=True) book['all_metadata'] = story.getAllMetadata(removeallentities=True) if options['savemetacol'] != '': book['savemetacol'] = story.dump_html_metadata() if options['smarten_punctuation'] and options['fileform'] == "epub" \ and calibre_version >= (0, 9, 39): # for smarten punc from calibre.ebooks.oeb.polish.main import polish, ALL_OPTS from calibre.utils.logging import Log from collections import namedtuple # do smarten_punctuation from calibre's polish feature data = {'smarten_punctuation':True} opts = ALL_OPTS.copy() opts.update(data) O = namedtuple('Options', ' '.join(six.iterkeys(ALL_OPTS))) opts = O(**opts) log = Log(level=Log.DEBUG) polish({outfile:outfile}, opts, log, logger.info) except NotGoingToDownload as d: book['good']=False book['status']=_('Bad') book['showerror']=d.showerror book['comment']=unicode(d) book['icon'] = d.icon except Exception as e: book['good']=False book['status']=_('Error') book['comment']=unicode(e) book['icon']='dialog_error.png' book['status'] = _('Error') logger.info("Exception: %s:%s"%(book,book['comment']),exc_info=True) return book