def _generate(self, title, masthead, datefmt, feeds, extra_css=None, style=None): self.IS_HTML = False date = '%s, %s %s, %s' % (strftime('%A'), strftime('%B'), strftime('%d').lstrip('0'), strftime('%Y')) masthead_p = etree.Element("p") masthead_p.set("style","text-align:center") masthead_img = etree.Element("img") masthead_img.set("src",masthead) masthead_img.set("alt","masthead") masthead_p.append(masthead_img) head = HEAD(TITLE(title)) if style: head.append(STYLE(style, type='text/css')) if extra_css: head.append(STYLE(extra_css, type='text/css')) toc = TABLE(CLASS('toc'),width="100%",border="0",cellpadding="3px") for i, feed in enumerate(feeds): if feed: tr = TR() tr.append(TD(CLASS('calibre_rescale_120'), A(feed.title, href='feed_%d/index.html'%i))) tr.append(TD('%s' % len(feed.articles), style="text-align:right")) toc.append(tr) div = DIV( masthead_p, H3(CLASS('publish_date'),date), DIV(CLASS('divider')), toc) self.root = HTML(head, BODY(div)) if self.html_lang: self.root.set('lang', self.html_lang)
def _generate(self, title, masthead, datefmt, feeds, extra_css=None, style=None): self.IS_HTML = False if isinstance(datefmt, unicode): datefmt = datefmt.encode(preferred_encoding) date = "%s, %s %s, %s" % (strftime("%A"), strftime("%B"), strftime("%d").lstrip("0"), strftime("%Y")) masthead_p = etree.Element("p") masthead_p.set("style", "text-align:center") masthead_img = etree.Element("img") masthead_img.set("src", masthead) masthead_img.set("alt", "masthead") masthead_p.append(masthead_img) head = HEAD(TITLE(title)) if style: head.append(STYLE(style, type="text/css")) if extra_css: head.append(STYLE(extra_css, type="text/css")) toc = TABLE(CLASS("toc"), width="100%", border="0", cellpadding="3px") for i, feed in enumerate(feeds): if feed: tr = TR() tr.append(TD(CLASS("calibre_rescale_120"), A(feed.title, href="feed_%d/index.html" % i))) tr.append(TD("%s" % len(feed.articles), style="text-align:right")) toc.append(tr) div = DIV(masthead_p, H3(CLASS("publish_date"), date), DIV(CLASS("divider")), toc) self.root = HTML(head, BODY(div)) if self.html_lang: self.root.set("lang", self.html_lang)
def parse_index(self): if self.appURL == 'http://app.inthepoche.com': baseURL = self.appURL + '/u/' + self.username + '/' else: baseURL = self.appURL soup = self.index_to_soup(baseURL + 'index.php') articles = {} key = None ans = [] for div in soup.findAll(True, attrs={'class': ['entrie']}): a = div.find('a', href=True) if not a: continue key = self.tag_to_string(div.find( 'a', attrs={'class': ['reading-time']})) url = baseURL + a['href'] title = self.tag_to_string(a, use_alt=False) description = '' pubdate = strftime('%a, %d %b') summary = div.find('p') if summary: description = self.tag_to_string(summary, use_alt=False) feed = key if key is not None else 'Uncategorized' if not feed in articles.keys(): articles[feed] = [] articles[feed].append(dict( title=title, url=url, date=pubdate, description=description, content='')) ans = [(key, articles[key]) for key in articles.keys()] return ans
def _generate(self, title, masthead, datefmt, feeds, extra_css=None, style=None): self.IS_HTML = False if isinstance(datefmt, unicode): datefmt = datefmt.encode(preferred_encoding) date = strftime(datefmt) head = HEAD(TITLE(title)) if style: head.append(STYLE(style, type="text/css")) if extra_css: head.append(STYLE(extra_css, type="text/css")) ul = UL(CLASS("calibre_feed_list")) for i, feed in enumerate(feeds): if feed: li = LI( A(feed.title, CLASS("feed", "calibre_rescale_120", href="feed_%d/index.html" % i)), id="feed_%d" % i ) ul.append(li) div = DIV( PT(IMG(src=masthead, alt="masthead"), style="text-align:center"), PT(date, style="text-align:right"), ul, CLASS("calibre_rescale_100"), ) self.root = HTML(head, BODY(div)) if self.html_lang: self.root.set("lang", self.html_lang)
def fix_pubdates(self): from calibre.utils.date import parse_date, strptime dirtied = False opf = self.container.opf for dcdate in opf.xpath('//dc:date', namespaces={'dc':'http://purl.org/dc/elements/1.1/'}): raw = dcdate.text if not raw: raw = '' default = strptime('2000-1-1', '%Y-%m-%d', as_utc=True) try: ts = parse_date(raw, assume_utc=False, as_utc=True, default=default) except: raise InvalidEpub('Invalid date set in OPF', raw) try: sval = ts.strftime('%Y-%m-%d') except: from calibre import strftime sval = strftime('%Y-%m-%d', ts.timetuple()) if sval != raw: self.log.error( 'OPF contains date', raw, 'that epubcheck does not like') if self.fix: dcdate.text = sval self.log('\tReplaced', raw, 'with', sval) dirtied = True if dirtied: self.container.set(self.container.opf_name, opf)
def test_winutil(self): from calibre.constants import plugins from calibre import strftime winutil = plugins['winutil'][0] def au(x, name): self.assertTrue(isinstance(x, unicode), name + '() did not return a unicode string') for x in winutil.argv(): au(x, 'argv') for x in 'username temp_path locale_name'.split(): au(getattr(winutil, x)(), x) d = winutil.localeconv() au(d['thousands_sep'], 'localeconv') au(d['decimal_point'], 'localeconv') for k, v in d.iteritems(): au(v, k) for k in os.environ.keys(): au(winutil.getenv(unicode(k)), 'getenv-' + k) os.environ['XXXTEST'] = 'YYY' self.assertEqual(winutil.getenv(u'XXXTEST'), u'YYY') del os.environ['XXXTEST'] self.assertIsNone(winutil.getenv(u'XXXTEST')) t = time.localtime() fmt = u'%Y%a%b%e%H%M' for fmt in (fmt, fmt.encode('ascii')): x = strftime(fmt, t) au(x, 'strftime') self.assertEqual(unicode(time.strftime(fmt.replace('%e', '%#d'), t)), x)
def __init__(self, date): if date is not None: tableItem.__init__(self, strftime('%x', date)) self.sort = date else: tableItem.__init__(self, '') self.sort = UNDEFINED_DATE
def populate_from_preparsed_feed(self, title, articles, oldest_article=7, max_articles_per_feed=100): self.title = unicode(title if title else _('Unknown feed')) self.description = '' self.image_url = None self.articles = [] self.added_articles = [] self.oldest_article = oldest_article self.id_counter = 0 for item in articles: if len(self.articles) >= max_articles_per_feed: break id = item.get('id', 'internal id#'+str(self.id_counter)) if id in self.added_articles: return self.added_articles.append(id) self.id_counter += 1 published = time.gmtime(item.get('timestamp', time.time())) title = item.get('title', _('Untitled article')) link = item.get('url', None) description = item.get('description', '') content = item.get('content', '') author = item.get('author', '') article = Article(id, title, link, author, description, published, content) delta = utcnow() - article.utctime if delta.days*24*3600 + delta.seconds <= 24*3600*self.oldest_article: self.articles.append(article) else: t = strftime(u'%a, %d %b, %Y %H:%M', article.localtime.timetuple()) self.logger.debug(u'Skipping article %s (%s) from feed %s as it is too old.'% (title, t, self.title)) d = item.get('date', '') article.formatted_date = d
def extract_info(self,div): a = div.find('a', href=True) if a: url = self.base_url + a['href'] title = BasicNewsRecipe.tag_to_string(a, use_alt=False) description = url pubdate = strftime('%a, %d %b') summary = div.find('p') if summary: description = BasicNewsRecipe.tag_to_string(summary, use_alt=False) return dict(title=title, url=url, date=pubdate,description=description, content='')
def _populate_pubdate(self): if 'pubdate' in self.mismatches: if self.mismatches['pubdate']['calibre']: cs_pubdate = "<b>Published:</b> {0}".format(strftime("%d %B %Y", t=self.mismatches['pubdate']['calibre'])) else: cs_pubdate = "<b>Published:</b> Date unknown" self.calibre_pubdate.setText(self.YELLOW_BG.format(cs_pubdate)) if self.mismatches['pubdate']['Marvin']: ms_pubdate = "<b>Published:</b> {0}".format(strftime("%d %B %Y", t=self.mismatches['pubdate']['Marvin'])) else: ms_pubdate = "<b>Published:</b> Date unknown" self.marvin_pubdate.setText(self.YELLOW_BG.format(ms_pubdate)) elif self.installed_book.pubdate: pubdate = "<b>Published:</b> {0}".format(strftime("%d %B %Y", t=self.installed_book.pubdate)) self.calibre_pubdate.setText(pubdate) self.marvin_pubdate.setText(pubdate) else: pubdate = "<b>Published:</b> Date unknown" self.calibre_pubdate.setText(pubdate) self.marvin_pubdate.setText(pubdate)
def __init__(self, parent, ext, size, path=None, timestamp=None): self.path = path self.ext = ext self.size = float(size)/(1024*1024) text = '%s (%.2f MB)'%(self.ext.upper(), self.size) QListWidgetItem.__init__(self, file_icon_provider().icon_from_ext(ext), text, parent, QListWidgetItem.UserType) if timestamp is not None: ts = timestamp.astimezone(local_tz) t = strftime('%a, %d %b %Y [%H:%M:%S]', ts.timetuple()) text = _('Last modified: %s\n\nDouble click to view')%t self.setToolTip(text) self.setStatusTip(text)
def do_test(self): from calibre.ebooks.metadata import authors_to_string from calibre.ebooks.metadata.meta import metadata_from_filename fname = unicode_type(self.filename.text()) ext = os.path.splitext(fname)[1][1:].lower() if ext not in BOOK_EXTENSIONS: return warning_dialog(self, _('Test file name invalid'), _('The file name <b>%s</b> does not appear to end with a' ' file extension. It must end with a file ' ' extension like .epub or .mobi')%fname, show=True) try: pat = self.pattern() except Exception as err: error_dialog(self, _('Invalid regular expression'), _('Invalid regular expression: %s')%err).exec_() return mi = metadata_from_filename(fname, pat) if mi.title: self.title.setText(mi.title) else: self.title.setText(_('No match')) if mi.authors: self.authors.setText(authors_to_string(mi.authors)) else: self.authors.setText(_('No match')) if mi.series: self.series.setText(mi.series) else: self.series.setText(_('No match')) if mi.series_index is not None: self.series_index.setText(str(mi.series_index)) else: self.series_index.setText(_('No match')) if mi.publisher: self.publisher.setText(mi.publisher) else: self.publisher.setText(_('No match')) if mi.pubdate: self.pubdate.setText(strftime('%Y-%m-%d', mi.pubdate)) else: self.pubdate.setText(_('No match')) self.isbn.setText(_('No match') if mi.isbn is None else str(mi.isbn)) self.comments.setText(mi.comments if mi.comments else _('No match'))
def add_annotation_to_library(self, db, db_id, annotation): from calibre.ebooks.BeautifulSoup import Tag from calibre.ebooks.metadata import MetaInformation bm = annotation ignore_tags = set(['Catalog', 'Clippings']) if bm.type == 'kindle_bookmark': mi = db.get_metadata(db_id, index_is_id=True) user_notes_soup = self.generate_annotation_html(bm.value) if mi.comments: a_offset = mi.comments.find('<div class="user_annotations">') ad_offset = mi.comments.find('<hr class="annotations_divider" />') if a_offset >= 0: mi.comments = mi.comments[:a_offset] if ad_offset >= 0: mi.comments = mi.comments[:ad_offset] if set(mi.tags).intersection(ignore_tags): return if mi.comments: hrTag = Tag(user_notes_soup,'hr') hrTag['class'] = 'annotations_divider' user_notes_soup.insert(0, hrTag) mi.comments += unicode(user_notes_soup.prettify()) else: mi.comments = unicode(user_notes_soup.prettify()) # Update library comments db.set_comment(db_id, mi.comments) # Add bookmark file to db_id db.add_format_with_hooks(db_id, bm.value.bookmark_extension, bm.value.path, index_is_id=True) elif bm.type == 'kindle_clippings': # Find 'My Clippings' author=Kindle in database, or add last_update = 'Last modified %s' % strftime(u'%x %X',bm.value['timestamp'].timetuple()) mc_id = list(db.data.search_getting_ids('title:"My Clippings"', '')) if mc_id: db.add_format_with_hooks(mc_id[0], 'TXT', bm.value['path'], index_is_id=True) mi = db.get_metadata(mc_id[0], index_is_id=True) mi.comments = last_update db.set_metadata(mc_id[0], mi) else: mi = MetaInformation('My Clippings', authors = ['Kindle']) mi.tags = ['Clippings'] mi.comments = last_update db.add_books([bm.value['path']], ['txt'], [mi])
def add_annotation_to_library(self, db, db_id, annotation): from calibre.ebooks.BeautifulSoup import Tag from calibre.ebooks.metadata import MetaInformation bm = annotation ignore_tags = set(['Catalog', 'Clippings']) if bm.type == 'kindle_bookmark': mi = db.get_metadata(db_id, index_is_id=True) user_notes_soup = self.generate_annotation_html(bm.value) if mi.comments: a_offset = mi.comments.find('<div class="user_annotations">') ad_offset = mi.comments.find('<hr class="annotations_divider" />') if a_offset >= 0: mi.comments = mi.comments[:a_offset] if ad_offset >= 0: mi.comments = mi.comments[:ad_offset] if set(mi.tags).intersection(ignore_tags): return if mi.comments: hrTag = Tag(user_notes_soup,'hr') hrTag['class'] = 'annotations_divider' user_notes_soup.insert(0, hrTag) mi.comments += unicode(user_notes_soup.prettify()) else: mi.comments = unicode(user_notes_soup.prettify()) # Update library comments db.set_comment(db_id, mi.comments) # Add bookmark file to db_id db.add_format_with_hooks(db_id, bm.value.bookmark_extension, bm.value.path, index_is_id=True) elif bm.type == 'kindle_clippings': # Find 'My Clippings' author=Kindle in database, or add last_update = 'Last modified %s' % strftime(u'%x %X',bm.value['timestamp'].timetuple()) mc_id = list(db.data.search_getting_ids('title:"My Clippings"', '', sort_results=False)) if mc_id: db.add_format_with_hooks(mc_id[0], 'TXT', bm.value['path'], index_is_id=True) mi = db.get_metadata(mc_id[0], index_is_id=True) mi.comments = last_update db.set_metadata(mc_id[0], mi) else: mi = MetaInformation('My Clippings', authors=['Kindle']) mi.tags = ['Clippings'] mi.comments = last_update db.add_books([bm.value['path']], ['txt'], [mi])
def extract_info(self, div): a = div.find('a', href=True) if a: url = self.base_url + a['href'] title = BasicNewsRecipe.tag_to_string(a, use_alt=False) description = url pubdate = strftime('%a, %d %b') summary = div.find('p') if summary: description = BasicNewsRecipe.tag_to_string(summary, use_alt=False) return dict(title=title, url=url, date=pubdate, description=description, content='')
def add_annotation_to_library(self, db, db_id, annotation): from calibre.ebooks.BeautifulSoup import Tag from calibre.ebooks.metadata import MetaInformation bm = annotation ignore_tags = set(["Catalog", "Clippings"]) if bm.type == "kindle_bookmark": mi = db.get_metadata(db_id, index_is_id=True) user_notes_soup = self.generate_annotation_html(bm.value) if mi.comments: a_offset = mi.comments.find('<div class="user_annotations">') ad_offset = mi.comments.find('<hr class="annotations_divider" />') if a_offset >= 0: mi.comments = mi.comments[:a_offset] if ad_offset >= 0: mi.comments = mi.comments[:ad_offset] if set(mi.tags).intersection(ignore_tags): return if mi.comments: hrTag = Tag(user_notes_soup, "hr") hrTag["class"] = "annotations_divider" user_notes_soup.insert(0, hrTag) mi.comments += unicode(user_notes_soup.prettify()) else: mi.comments = unicode(user_notes_soup.prettify()) # Update library comments db.set_comment(db_id, mi.comments) # Add bookmark file to db_id db.add_format_with_hooks(db_id, bm.value.bookmark_extension, bm.value.path, index_is_id=True) elif bm.type == "kindle_clippings": # Find 'My Clippings' author=Kindle in database, or add last_update = "Last modified %s" % strftime(u"%x %X", bm.value["timestamp"].timetuple()) mc_id = list(db.data.search_getting_ids('title:"My Clippings"', "")) if mc_id: db.add_format_with_hooks(mc_id[0], "TXT", bm.value["path"], index_is_id=True) mi = db.get_metadata(mc_id[0], index_is_id=True) mi.comments = last_update db.set_metadata(mc_id[0], mi) else: mi = MetaInformation("My Clippings", authors=["Kindle"]) mi.tags = ["Clippings"] mi.comments = last_update db.add_books([bm.value["path"]], ["txt"], [mi])
def parse_index(self): articles = [] feeds = [] soup = self.index_to_soup(self.INDEX) for feed_link in soup.findAll("a", "latestnews_menu"): url = self.INDEX + feed_link['href'] title = self.tag_to_string(feed_link) date = strftime(self.timefmt) articles.append({ 'title': title, 'date': date, 'url': url, 'description': '' }) if articles: feeds.append(('Latest Articles', articles)) return feeds
def parse_index(self): articles = [] feeds = [] soup = self.index_to_soup(self.INDEX) for feed_link in soup.findAll('a',href=True): if feed_link['href'].startswith('stories/'): url = self.INDEX + feed_link['href'] title = self.tag_to_string(feed_link) date = strftime(self.timefmt) articles.append({ 'title' :title ,'date' :date ,'url' :url ,'description':'' }) if articles: feeds.append(('Frontline', articles)) return feeds
def parse_index(self): articles = [] feeds = [] soup = self.index_to_soup(self.INDEX) for feed_link in soup.findAll("a" , "latestnews_menu"): url = self.INDEX + feed_link['href'] title = self.tag_to_string(feed_link) date = strftime(self.timefmt) articles.append({ 'title' :title ,'date' :date ,'url' :url ,'description':'' }) if articles: feeds.append(('Latest Articles', articles)) return feeds
def populate_from_preparsed_feed(self, title, articles, oldest_article=7, max_articles_per_feed=100): self.title = str(title if title else _('Unknown feed')) self.description = '' self.image_url = None self.articles = [] self.added_articles = [] self.oldest_article = oldest_article self.id_counter = 0 for item in articles: if len(self.articles) >= max_articles_per_feed: break self.id_counter += 1 id = item.get('id', None) if not id: id = 'internal id#%s' % self.id_counter if id in self.added_articles: return self.added_articles.append(id) published = time.gmtime(item.get('timestamp', time.time())) title = item.get('title', _('Untitled article')) link = item.get('url', None) description = item.get('description', '') content = item.get('content', '') author = item.get('author', '') article = Article(id, title, link, author, description, published, content) delta = utcnow() - article.utctime if delta.days * 24 * 3600 + delta.seconds <= 24 * 3600 * self.oldest_article: self.articles.append(article) else: t = strftime('%a, %d %b, %Y %H:%M', article.localtime.timetuple()) self.logger.debug( 'Skipping article %s (%s) from feed %s as it is too old.' % (title, t, self.title)) d = item.get('date', '') article.formatted_date = d
def data(self, index, role): try: if role not in (Qt.ItemDataRole.DisplayRole, Qt.ItemDataRole.DecorationRole): return None row, col = index.row(), index.column() job = self.jobs[row] if role == Qt.ItemDataRole.DisplayRole: if col == 0: desc = job.description if not desc: desc = _('Unknown job') return (desc) if col == 1: return (job.status_text) if col == 2: p = 100. if job.is_finished else job.percent return (p) if col == 3: rtime = job.running_time if rtime is None: return None return human_readable_interval(rtime) if col == 4 and job.start_time is not None: return (strftime('%H:%M -- %d %b', time.localtime(job.start_time))) if role == Qt.ItemDataRole.DecorationRole and col == 0: state = job.run_state if state == job.WAITING: return self.wait_icon if state == job.RUNNING: return self.running_icon if job.killed or job.failed: return self.error_icon return self.done_icon except: import traceback traceback.print_exc() return None
def handle_article(div): a = div.find('a', href=True) if not a: return url = re.sub(r'\?.*', '', a['href']) if not url.startswith("http"): return if not url.endswith(".html"): return if 'podcast' in url: return if '/video/' in url: return url += '?pagewanted=all' if url in url_list: return url_list.append(url) title = self.tag_to_string(a, use_alt=True).strip() description = '' pubdate = strftime('%a, %d %b') summary = div.find(True, attrs={'class':'summary'}) if summary: description = self.tag_to_string(summary, use_alt=False) author = '' authorAttribution = div.find(True, attrs={'class':'byline'}) if authorAttribution: author = self.tag_to_string(authorAttribution, use_alt=False) else: authorAttribution = div.find(True, attrs={'class':'byline'}) if authorAttribution: author = self.tag_to_string(authorAttribution, use_alt=False) feed = key if key is not None else 'Uncategorized' if not articles.has_key(feed): ans.append(feed) articles[feed] = [] articles[feed].append( dict(title=title, url=url, date=pubdate, description=description, author=author, content=''))
def _generate(self, title, masthead, datefmt, feeds, extra_css=None, style=None): self.IS_HTML = False date = strftime(datefmt) head = HEAD(TITLE(title)) if style: head.append(STYLE(style, type='text/css')) if extra_css: head.append(STYLE(extra_css, type='text/css')) ul = UL(CLASS('calibre_feed_list')) for i, feed in enumerate(feeds): if feed: li = LI(A(feed.title, CLASS('feed', 'calibre_rescale_120', href='feed_%d/index.html'%i)), id='feed_%d'%i) ul.append(li) div = DIV( PT(IMG(src=masthead,alt="masthead"),style='text-align:center'), PT(date, style='text-align:right'), ul, CLASS('calibre_rescale_100')) self.root = HTML(head, BODY(div)) if self.html_lang: self.root.set('lang', self.html_lang)
def data(self, index, role): try: if role not in (Qt.DisplayRole, Qt.DecorationRole): return None row, col = index.row(), index.column() job = self.jobs[row] if role == Qt.DisplayRole: if col == 0: desc = job.description if not desc: desc = _('Unknown job') return (desc) if col == 1: return (job.status_text) if col == 2: p = 100. if job.is_finished else job.percent return (p) if col == 3: rtime = job.running_time if rtime is None: return None return human_readable_interval(rtime) if col == 4 and job.start_time is not None: return (strftime(u'%H:%M -- %d %b', time.localtime(job.start_time))) if role == Qt.DecorationRole and col == 0: state = job.run_state if state == job.WAITING: return self.wait_icon if state == job.RUNNING: return self.running_icon if job.killed or job.failed: return self.error_icon return self.done_icon except: import traceback traceback.print_exc() return None
def test_winutil(self): from calibre.constants import plugins from calibre import strftime winutil = plugins['winutil'][0] def au(x, name): self.assertTrue( isinstance(x, unicode_type), '%s() did not return a unicode string, instead returning: %r' % (name, x)) for x in winutil.argv(): au(x, 'argv') for x in 'username temp_path locale_name'.split(): au(getattr(winutil, x)(), x) d = winutil.localeconv() au(d['thousands_sep'], 'localeconv') au(d['decimal_point'], 'localeconv') for k, v in iteritems(d): au(v, k) os.environ['XXXTEST'] = 'YYY' self.assertEqual(getenv('XXXTEST'), 'YYY') del os.environ['XXXTEST'] self.assertIsNone(getenv('XXXTEST')) for k in os.environ: v = getenv(k) if v is not None: au(v, 'getenv-' + k) t = time.localtime() fmt = '%Y%a%b%e%H%M' for fmt in (fmt, fmt.encode('ascii')): x = strftime(fmt, t) au(x, 'strftime') if isinstance(fmt, bytes): fmt = fmt.decode('ascii') self.assertEqual( unicode_type(time.strftime(fmt.replace('%e', '%#d'), t)), x)
def handle_article(self,div): thumbnail = div.find('div','thumbnail') if thumbnail: thumbnail.extract() a = div.find('a', href=True) if not a: return url = re.sub(r'\?.*', '', a['href']) if self.exclude_url(url): return url += '?pagewanted=all' if self.filterDuplicates: if url in self.url_list: return self.url_list.append(url) title = self.tag_to_string(a, use_alt=True).strip() description = '' pubdate = strftime('%a, %d %b') summary = div.find(True, attrs={'class':'summary'}) if summary: description = self.tag_to_string(summary, use_alt=False) author = '' authorAttribution = div.find(True, attrs={'class':'byline'}) if authorAttribution: author = self.tag_to_string(authorAttribution, use_alt=False) else: authorAttribution = div.find(True, attrs={'class':'byline'}) if authorAttribution: author = self.tag_to_string(authorAttribution, use_alt=False) feed = self.key if self.key is not None else 'Uncategorized' if not self.articles.has_key(feed): self.ans.append(feed) self.articles[feed] = [] self.articles[feed].append( dict(title=title, url=url, date=pubdate, description=description, author=author, content=''))
def run(self, path_to_output, opts, db, notification=DummyReporter()): from calibre.library.catalogs.epub_mobi_builder import CatalogBuilder from calibre.utils.logging import default_log as log from calibre.utils.config import JSONConfig # If preset specified from the cli, insert stored options from JSON file if hasattr(opts, 'preset') and opts.preset: available_presets = JSONConfig("catalog_presets") if opts.preset not in available_presets: if available_presets: print(_('Error: Preset "%s" not found.' % opts.preset)) print(_('Stored presets: %s' % ', '.join([p for p in sorted(available_presets.keys())]))) else: print(_('Error: No stored presets.')) return 1 # Copy the relevant preset values to the opts object for item in available_presets[opts.preset]: if item not in ['exclusion_rules_tw', 'format', 'prefix_rules_tw']: setattr(opts, item, available_presets[opts.preset][item]) # Provide an unconnected device opts.connected_device = { 'is_device_connected': False, 'kind': None, 'name': None, 'save_template': None, 'serial': None, 'storage': None, } # Convert prefix_rules and exclusion_rules from JSON lists to tuples prs = [] for rule in opts.prefix_rules: prs.append(tuple(rule)) opts.prefix_rules = tuple(prs) ers = [] for rule in opts.exclusion_rules: ers.append(tuple(rule)) opts.exclusion_rules = tuple(ers) opts.log = log opts.fmt = self.fmt = path_to_output.rpartition('.')[2] # Add local options opts.creator = '%s, %s %s, %s' % (strftime('%A'), strftime('%B'), strftime('%d').lstrip('0'), strftime('%Y')) opts.creator_sort_as = '%s %s' % ('calibre', strftime('%Y-%m-%d')) opts.connected_kindle = False # Finalize output_profile op = opts.output_profile if op is None: op = 'default' if opts.connected_device['name'] and 'kindle' in opts.connected_device['name'].lower(): opts.connected_kindle = True if opts.connected_device['serial'] and \ opts.connected_device['serial'][:4] in ['B004', 'B005']: op = "kindle_dx" else: op = "kindle" opts.description_clip = 380 if op.endswith('dx') or 'kindle' not in op else 100 opts.author_clip = 100 if op.endswith('dx') or 'kindle' not in op else 60 opts.output_profile = op opts.basename = "Catalog" opts.cli_environment = not hasattr(opts, 'sync') # Hard-wired to always sort descriptions by author, with series after non-series opts.sort_descriptions_by_author = True build_log = [] build_log.append(u"%s('%s'): Generating %s %sin %s environment, locale: '%s'" % (self.name, current_library_name(), self.fmt, 'for %s ' % opts.output_profile if opts.output_profile else '', 'CLI' if opts.cli_environment else 'GUI', calibre_langcode_to_name(canonicalize_lang(get_lang()), localize=False)) ) # If exclude_genre is blank, assume user wants all tags as genres if opts.exclude_genre.strip() == '': # opts.exclude_genre = '\[^.\]' # build_log.append(" converting empty exclude_genre to '\[^.\]'") opts.exclude_genre = 'a^' build_log.append(" converting empty exclude_genre to 'a^'") if opts.connected_device['is_device_connected'] and \ opts.connected_device['kind'] == 'device': if opts.connected_device['serial']: build_log.append(u" connected_device: '%s' #%s%s " % (opts.connected_device['name'], opts.connected_device['serial'][0:4], 'x' * (len(opts.connected_device['serial']) - 4))) for storage in opts.connected_device['storage']: if storage: build_log.append(u" mount point: %s" % storage) else: build_log.append(u" connected_device: '%s'" % opts.connected_device['name']) try: for storage in opts.connected_device['storage']: if storage: build_log.append(u" mount point: %s" % storage) except: build_log.append(u" (no mount points)") else: build_log.append(u" connected_device: '%s'" % opts.connected_device['name']) opts_dict = vars(opts) if opts_dict['ids']: build_log.append(" book count: %d" % len(opts_dict['ids'])) sections_list = [] if opts.generate_authors: sections_list.append('Authors') if opts.generate_titles: sections_list.append('Titles') if opts.generate_series: sections_list.append('Series') if opts.generate_genres: sections_list.append('Genres') if opts.generate_recently_added: sections_list.append('Recently Added') if opts.generate_descriptions: sections_list.append('Descriptions') if not sections_list: if opts.cli_environment: opts.log.warn('*** No Section switches specified, enabling all Sections ***') opts.generate_authors = True opts.generate_titles = True opts.generate_series = True opts.generate_genres = True opts.generate_recently_added = True opts.generate_descriptions = True sections_list = ['Authors', 'Titles', 'Series', 'Genres', 'Recently Added', 'Descriptions'] else: opts.log.warn('\n*** No enabled Sections, terminating catalog generation ***') return ["No Included Sections", "No enabled Sections.\nCheck E-book options tab\n'Included sections'\n"] if opts.fmt == 'mobi' and sections_list == ['Descriptions']: warning = _("\n*** Adding 'By authors' section required for MOBI output ***") opts.log.warn(warning) sections_list.insert(0, 'Authors') opts.generate_authors = True opts.log(u" Sections: %s" % ', '.join(sections_list)) opts.section_list = sections_list # Limit thumb_width to 1.0" - 2.0" try: if float(opts.thumb_width) < float(self.THUMB_SMALLEST): log.warning("coercing thumb_width from '%s' to '%s'" % (opts.thumb_width, self.THUMB_SMALLEST)) opts.thumb_width = self.THUMB_SMALLEST if float(opts.thumb_width) > float(self.THUMB_LARGEST): log.warning("coercing thumb_width from '%s' to '%s'" % (opts.thumb_width, self.THUMB_LARGEST)) opts.thumb_width = self.THUMB_LARGEST opts.thumb_width = "%.2f" % float(opts.thumb_width) except: log.error("coercing thumb_width from '%s' to '%s'" % (opts.thumb_width, self.THUMB_SMALLEST)) opts.thumb_width = "1.0" # eval prefix_rules if passed from command line if type(opts.prefix_rules) is not tuple: try: opts.prefix_rules = eval(opts.prefix_rules) except: log.error("malformed --prefix-rules: %s" % opts.prefix_rules) raise for rule in opts.prefix_rules: if len(rule) != 4: log.error("incorrect number of args for --prefix-rules: %s" % repr(rule)) # eval exclusion_rules if passed from command line if type(opts.exclusion_rules) is not tuple: try: opts.exclusion_rules = eval(opts.exclusion_rules) except: log.error("malformed --exclusion-rules: %s" % opts.exclusion_rules) raise for rule in opts.exclusion_rules: if len(rule) != 3: log.error("incorrect number of args for --exclusion-rules: %s" % repr(rule)) # Display opts keys = sorted(opts_dict.keys()) build_log.append(" opts:") for key in keys: if key in ['catalog_title', 'author_clip', 'connected_kindle', 'creator', 'cross_reference_authors', 'description_clip', 'exclude_book_marker', 'exclude_genre', 'exclude_tags', 'exclusion_rules', 'fmt', 'genre_source_field', 'header_note_source_field', 'merge_comments_rule', 'output_profile', 'prefix_rules', 'preset', 'read_book_marker', 'search_text', 'sort_by', 'sort_descriptions_by_author', 'sync', 'thumb_width', 'use_existing_cover', 'wishlist_tag']: build_log.append(" %s: %s" % (key, repr(opts_dict[key]))) if opts.verbose: log('\n'.join(line for line in build_log)) # Capture start_time opts.start_time = time.time() self.opts = opts if opts.verbose: log.info(" Begin catalog source generation (%s)" % str(datetime.timedelta(seconds=int(time.time() - opts.start_time)))) # Launch the Catalog builder catalog = CatalogBuilder(db, opts, self, report_progress=notification) try: catalog.build_sources() if opts.verbose: log.info(" Completed catalog source generation (%s)\n" % str(datetime.timedelta(seconds=int(time.time() - opts.start_time)))) except (AuthorSortMismatchException, EmptyCatalogException) as e: log.error(" *** Terminated catalog generation: %s ***" % e) except: log.error(" unhandled exception in catalog generator") raise else: recommendations = [] recommendations.append(('remove_fake_margins', False, OptionRecommendation.HIGH)) recommendations.append(('comments', '', OptionRecommendation.HIGH)) """ >>> Use to debug generated catalog code before pipeline conversion <<< """ GENERATE_DEBUG_EPUB = False if GENERATE_DEBUG_EPUB: catalog_debug_path = os.path.join(os.path.expanduser('~'), 'Desktop', 'Catalog debug') setattr(opts, 'debug_pipeline', os.path.expanduser(catalog_debug_path)) dp = getattr(opts, 'debug_pipeline', None) if dp is not None: recommendations.append(('debug_pipeline', dp, OptionRecommendation.HIGH)) if opts.output_profile and opts.output_profile.startswith("kindle"): recommendations.append(('output_profile', opts.output_profile, OptionRecommendation.HIGH)) recommendations.append(('book_producer', opts.output_profile, OptionRecommendation.HIGH)) if opts.fmt == 'mobi': recommendations.append(('no_inline_toc', True, OptionRecommendation.HIGH)) recommendations.append(('verbose', 2, OptionRecommendation.HIGH)) # Use existing cover or generate new cover cpath = None existing_cover = False try: search_text = 'title:"%s" author:%s' % ( opts.catalog_title.replace('"', '\\"'), 'calibre') matches = db.search(search_text, return_matches=True, sort_results=False) if matches: cpath = db.cover(matches[0], index_is_id=True, as_path=True) if cpath and os.path.exists(cpath): existing_cover = True except: pass if self.opts.use_existing_cover and not existing_cover: log.warning("no existing catalog cover found") if self.opts.use_existing_cover and existing_cover: recommendations.append(('cover', cpath, OptionRecommendation.HIGH)) log.info("using existing catalog cover") else: from calibre.ebooks.covers import calibre_cover2 log.info("replacing catalog cover") new_cover_path = PersistentTemporaryFile(suffix='.jpg') new_cover = calibre_cover2(opts.catalog_title, 'calibre') new_cover_path.write(new_cover) new_cover_path.close() recommendations.append(('cover', new_cover_path.name, OptionRecommendation.HIGH)) # Run ebook-convert from calibre.ebooks.conversion.plumber import Plumber plumber = Plumber(os.path.join(catalog.catalog_path, opts.basename + '.opf'), path_to_output, log, report_progress=notification, abort_after_input_dump=False) plumber.merge_ui_recommendations(recommendations) plumber.run() try: os.remove(cpath) except: pass if GENERATE_DEBUG_EPUB: from calibre.ebooks.epub import initialize_container from calibre.ebooks.tweak import zip_rebuilder from calibre.utils.zipfile import ZipFile input_path = os.path.join(catalog_debug_path, 'input') epub_shell = os.path.join(catalog_debug_path, 'epub_shell.zip') initialize_container(epub_shell, opf_name='content.opf') with ZipFile(epub_shell, 'r') as zf: zf.extractall(path=input_path) os.remove(epub_shell) zip_rebuilder(input_path, os.path.join(catalog_debug_path, 'input.epub')) if opts.verbose: log.info(" Catalog creation complete (%s)\n" % str(datetime.timedelta(seconds=int(time.time() - opts.start_time)))) # returns to gui2.actions.catalog:catalog_generated() return catalog.error
def test_winutil(self): import tempfile from calibre.constants import plugins from calibre import strftime winutil = plugins['winutil'][0] def au(x, name): self.assertTrue( isinstance(x, unicode_type), '%s() did not return a unicode string, instead returning: %r' % (name, x)) for x in 'username temp_path locale_name'.split(): au(getattr(winutil, x)(), x) d = winutil.localeconv() au(d['thousands_sep'], 'localeconv') au(d['decimal_point'], 'localeconv') for k, v in iteritems(d): au(v, k) os.environ['XXXTEST'] = 'YYY' self.assertEqual(getenv('XXXTEST'), 'YYY') del os.environ['XXXTEST'] self.assertIsNone(getenv('XXXTEST')) for k in os.environ: v = getenv(k) if v is not None: au(v, 'getenv-' + k) t = time.localtime() fmt = '%Y%a%b%e%H%M' for fmt in (fmt, fmt.encode('ascii')): x = strftime(fmt, t) au(x, 'strftime') tdir = winutil.temp_path() path = os.path.join(tdir, 'test-create-file.txt') h = winutil.create_file( path, winutil.GENERIC_READ | winutil.GENERIC_WRITE, 0, winutil.OPEN_ALWAYS, winutil.FILE_ATTRIBUTE_NORMAL) winutil.close_handle(h) self.assertRaises(OSError, winutil.close_handle, h) winutil.delete_file(path) self.assertRaises(OSError, winutil.delete_file, path) self.assertRaises(OSError, winutil.create_file, os.path.join(path, 'cannot'), winutil.GENERIC_READ, 0, winutil.OPEN_ALWAYS, winutil.FILE_ATTRIBUTE_NORMAL) sz = 23 data = os.urandom(sz) open(path, 'wb').write(data) h = winutil.create_file( path, winutil.GENERIC_READ | winutil.GENERIC_WRITE, 0, winutil.OPEN_ALWAYS, winutil.FILE_ATTRIBUTE_NORMAL) self.assertEqual(winutil.get_file_size(h), sz) self.assertRaises(OSError, winutil.set_file_pointer, h, 23, 23) self.assertEqual(winutil.read_file(h), data) self.assertEqual(winutil.read_file(h), b'') winutil.set_file_pointer(h, 3) self.assertEqual(winutil.read_file(h), data[3:]) winutil.close_handle(h) self.assertEqual(winutil.nlinks(path), 1) npath = path + '.2' winutil.create_hard_link(npath, path) self.assertEqual(open(npath, 'rb').read(), data) self.assertEqual(winutil.nlinks(path), 2) winutil.delete_file(path) self.assertEqual(winutil.nlinks(npath), 1) winutil.set_file_attributes(npath, winutil.FILE_ATTRIBUTE_READONLY) self.assertRaises(OSError, winutil.delete_file, npath) winutil.set_file_attributes(npath, winutil.FILE_ATTRIBUTE_NORMAL) winutil.delete_file(npath) self.assertGreater(min(winutil.get_disk_free_space(None)), 0) open(path, 'wb').close() open(npath, 'wb').close() winutil.move_file(path, npath, winutil.MOVEFILE_WRITE_THROUGH | winutil.MOVEFILE_REPLACE_EXISTING) self.assertFalse(os.path.exists(path)) os.remove(npath) dpath = tempfile.mkdtemp(dir=os.path.dirname(path)) dh = winutil.create_file( dpath, winutil.FILE_LIST_DIRECTORY, winutil.FILE_SHARE_READ, winutil.OPEN_EXISTING, winutil.FILE_FLAG_BACKUP_SEMANTICS, ) from threading import Thread events = [] def read_changes(): buffer = b'0' * 8192 events.extend(winutil.read_directory_changes( dh, buffer, True, winutil.FILE_NOTIFY_CHANGE_FILE_NAME | winutil.FILE_NOTIFY_CHANGE_DIR_NAME | winutil.FILE_NOTIFY_CHANGE_ATTRIBUTES | winutil.FILE_NOTIFY_CHANGE_SIZE | winutil.FILE_NOTIFY_CHANGE_LAST_WRITE | winutil.FILE_NOTIFY_CHANGE_SECURITY )) t = Thread(target=read_changes, daemon=True) t.start() testp = os.path.join(dpath, 'test') open(testp, 'w').close() t.join(2) self.assertTrue(events) for actions, path in events: self.assertEqual(os.path.join(dpath, path), testp) winutil.close_handle(dh) os.remove(testp) os.rmdir(dpath)
def get_components(template, mi, id, timefmt='%b %Y', length=250, sanitize_func=ascii_filename, replace_whitespace=False, to_lowercase=False, safe_format=True, last_has_extension=True, single_dir=False): tsorder = tweaks['save_template_title_series_sorting'] format_args = FORMAT_ARGS.copy() format_args.update(mi.all_non_none_fields()) if mi.title: if tsorder == 'strictly_alphabetic': v = mi.title else: # title_sort might be missing or empty. Check both conditions v = mi.get('title_sort', None) if not v: v = title_sort(mi.title, order=tsorder) format_args['title'] = v if mi.authors: format_args['authors'] = mi.format_authors() format_args['author'] = format_args['authors'] if mi.tags: format_args['tags'] = mi.format_tags() if format_args['tags'].startswith('/'): format_args['tags'] = format_args['tags'][1:] else: format_args['tags'] = '' if mi.series: format_args['series'] = title_sort(mi.series, order=tsorder) if mi.series_index is not None: format_args['series_index'] = mi.format_series_index() else: template = re.sub(r'\{series_index[^}]*?\}', '', template) if mi.rating is not None: format_args['rating'] = mi.format_rating(divide_by=2.0) if mi.identifiers: format_args['identifiers'] = mi.format_field_extended('identifiers')[1] else: format_args['identifiers'] = '' if hasattr(mi.timestamp, 'timetuple'): format_args['timestamp'] = strftime(timefmt, mi.timestamp.timetuple()) if hasattr(mi.pubdate, 'timetuple'): format_args['pubdate'] = strftime(timefmt, mi.pubdate.timetuple()) if hasattr(mi, 'last_modified') and hasattr(mi.last_modified, 'timetuple'): format_args['last_modified'] = strftime(timefmt, mi.last_modified.timetuple()) format_args['id'] = unicode_type(id) # Now format the custom fields custom_metadata = mi.get_all_user_metadata(make_copy=False) for key in custom_metadata: if key in format_args: cm = custom_metadata[key] if cm['datatype'] == 'series': format_args[key] = title_sort(format_args[key], order=tsorder) if key+'_index' in format_args: format_args[key+'_index'] = fmt_sidx(format_args[key+'_index']) elif cm['datatype'] == 'datetime': format_args[key] = strftime(timefmt, as_local_time(format_args[key]).timetuple()) elif cm['datatype'] == 'bool': format_args[key] = _('yes') if format_args[key] else _('no') elif cm['datatype'] == 'rating': format_args[key] = mi.format_rating(format_args[key], divide_by=2.0) elif cm['datatype'] in ['int', 'float']: if format_args[key] != 0: format_args[key] = unicode_type(format_args[key]) else: format_args[key] = '' if safe_format: components = Formatter().safe_format(template, format_args, 'G_C-EXCEPTION!', mi) else: components = Formatter().unsafe_format(template, format_args, mi) components = [x.strip() for x in components.split('/')] components = [sanitize_func(x) for x in components if x] if not components: components = [unicode_type(id)] if to_lowercase: components = [x.lower() for x in components] if replace_whitespace: components = [re.sub(r'\s', '_', x) for x in components] if single_dir: components = components[-1:] return shorten_components_to(length, components, last_has_extension=last_has_extension)
def _set_pubdate(publish_info, mi, ctx): if mi.is_null('pubdate'): return ctx.clear_meta_tags(publish_info, 'year') tag = ctx.create_tag(publish_info, 'year') tag.text = strftime('%Y', mi.pubdate)
def render_jacket(mi, output_profile, alt_title=_('Unknown'), alt_tags=[], alt_comments='', alt_publisher=(''), rescale_fonts=False): css = P('jacket/stylesheet.css', data=True).decode('utf-8') template = P('jacket/template.xhtml', data=True).decode('utf-8') template = re.sub(r'<!--.*?-->', '', template, flags=re.DOTALL) css = re.sub(r'/\*.*?\*/', '', css, flags=re.DOTALL) try: title_str = mi.title if mi.title else alt_title except: title_str = _('Unknown') title_str = escape(title_str) title = '<span class="title">%s</span>' % title_str series = Series(mi.series, mi.series_index) try: publisher = mi.publisher if mi.publisher else alt_publisher except: publisher = '' publisher = escape(publisher) try: if is_date_undefined(mi.pubdate): pubdate = '' else: dt = as_local_time(mi.pubdate) pubdate = strftime(u'%Y', dt.timetuple()) except: pubdate = '' rating = get_rating(mi.rating, output_profile.ratings_char, output_profile.empty_ratings_char) tags = Tags((mi.tags if mi.tags else alt_tags), output_profile) comments = mi.comments if mi.comments else alt_comments comments = comments.strip() orig_comments = comments if comments: comments = comments_to_html(comments) try: author = mi.format_authors() except: author = '' author = escape(author) def generate_html(comments): args = dict( xmlns=XHTML_NS, title_str=title_str, css=css, title=title, author=author, publisher=publisher, pubdate_label=_('Published'), pubdate=pubdate, series_label=_('Series'), series=series, rating_label=_('Rating'), rating=rating, tags_label=_('Tags'), tags=tags, comments=comments, footer='', searchable_tags=' '.join( escape(t) + 'ttt' for t in tags.tags_list), ) for key in mi.custom_field_keys(): m = mi.get_user_metadata(key, False) or {} try: display_name, val = mi.format_field_extended(key)[:2] dkey = key.replace('#', '_') dt = m.get('datatype') if dt == 'series': args[dkey] = Series(mi.get(key), mi.get(key + '_index')) elif dt == 'rating': args[dkey] = rating_to_stars( mi.get(key), m.get('display', {}).get('allow_half_stars', False)) else: args[dkey] = escape(val) args[dkey + '_label'] = escape(display_name) except Exception: # if the val (custom column contents) is None, don't add to args pass if False: print("Custom column values available in jacket template:") for key in args.keys(): if key.startswith('_') and not key.endswith('_label'): print(" %s: %s" % ('#' + key[1:], args[key])) # Used in the comment describing use of custom columns in templates # Don't change this unless you also change it in template.xhtml args['_genre_label'] = args.get('_genre_label', '{_genre_label}') args['_genre'] = args.get('_genre', '{_genre}') formatter = SafeFormatter() generated_html = formatter.format(template, **args) # Post-process the generated html to strip out empty header items soup = BeautifulSoup(generated_html) if not series: series_tag = soup.find(attrs={'class': 'cbj_series'}) if series_tag is not None: series_tag.extract() if not rating: rating_tag = soup.find(attrs={'class': 'cbj_rating'}) if rating_tag is not None: rating_tag.extract() if not tags: tags_tag = soup.find(attrs={'class': 'cbj_tags'}) if tags_tag is not None: tags_tag.extract() if not pubdate: pubdate_tag = soup.find(attrs={'class': 'cbj_pubdata'}) if pubdate_tag is not None: pubdate_tag.extract() if output_profile.short_name != 'kindle': hr_tag = soup.find('hr', attrs={'class': 'cbj_kindle_banner_hr'}) if hr_tag is not None: hr_tag.extract() return strip_encoding_declarations( soup.renderContents('utf-8').decode('utf-8')) from calibre.ebooks.oeb.base import RECOVER_PARSER try: root = etree.fromstring(generate_html(comments), parser=RECOVER_PARSER) except: try: root = etree.fromstring(generate_html(escape(orig_comments)), parser=RECOVER_PARSER) except: root = etree.fromstring(generate_html(''), parser=RECOVER_PARSER) if rescale_fonts: # We ensure that the conversion pipeline will set the font sizes for # text in the jacket to the same size as the font sizes for the rest of # the text in the book. That means that as long as the jacket uses # relative font sizes (em or %), the post conversion font size will be # the same as for text in the main book. So text with size x em will # be rescaled to the same value in both the jacket and the main content. # # We cannot use calibre_rescale_100 on the body tag as that will just # give the body tag a font size of 1em, which is useless. for body in root.xpath('//*[local-name()="body"]'): fw = body.makeelement(XHTML('div')) fw.set('class', 'calibre_rescale_100') for child in body: fw.append(child) body.append(fw) from calibre.ebooks.oeb.polish.pretty import pretty_html_tree pretty_html_tree(None, root) return root
def run(self, path_to_output, opts, db, notification=DummyReporter()): from calibre.utils.date import isoformat from calibre.utils.html2text import html2text from calibre.utils.bibtex import BibTeX from calibre.library.save_to_disk import preprocess_template from calibre.utils.logging import default_log as log from calibre.utils.filenames import ascii_text library_name = os.path.basename(db.library_path) def create_bibtex_entry(entry, fields, mode, template_citation, bibtexdict, db, citation_bibtex=True, calibre_files=True): # Bibtex doesn't like UTF-8 but keep unicode until writing # Define starting chain or if book valid strict and not book return a Fail string bibtex_entry = [] if mode != "misc" and check_entry_book_valid(entry): bibtex_entry.append('@book{') elif mode != "book": bibtex_entry.append('@misc{') else: # case strict book return '' if citation_bibtex: # Citation tag bibtex_entry.append( make_bibtex_citation(entry, template_citation, bibtexdict)) bibtex_entry = [' '.join(bibtex_entry)] for field in fields: if field.startswith('#'): item = db.get_field(entry['id'], field, index_is_id=True) if isinstance(item, (bool, numbers.Number)): item = repr(item) elif field == 'title_sort': item = entry['sort'] elif field == 'library_name': item = library_name else: item = entry[field] # check if the field should be included (none or empty) if item is None: continue try: if len(item) == 0: continue except TypeError: pass if field == 'authors': bibtex_entry.append('author = "%s"' % bibtexdict.bibtex_author_format(item)) elif field == 'id': bibtex_entry.append('calibreid = "%s"' % int(item)) elif field == 'rating': bibtex_entry.append('rating = "%s"' % int(item)) elif field == 'size': bibtex_entry.append('%s = "%s octets"' % (field, int(item))) elif field == 'tags': # A list to flatten bibtex_entry.append( 'tags = "%s"' % bibtexdict.utf8ToBibtex(', '.join(item))) elif field == 'comments': # \n removal item = item.replace('\r\n', ' ') item = item.replace('\n', ' ') # unmatched brace removal (users should use \leftbrace or \rightbrace for single braces) item = bibtexdict.stripUnmatchedSyntax(item, '{', '}') # html to text try: item = html2text(item) except: log.warn("Failed to convert comments to text") bibtex_entry.append('note = "%s"' % bibtexdict.utf8ToBibtex(item)) elif field == 'isbn': # Could be 9, 10 or 13 digits bibtex_entry.append('isbn = "%s"' % format_isbn(item)) elif field == 'formats': # Add file path if format is selected formats = [ format.rpartition('.')[2].lower() for format in item ] bibtex_entry.append('formats = "%s"' % ', '.join(formats)) if calibre_files: files = [ ':%s:%s' % (format, format.rpartition('.')[2].upper()) for format in item ] bibtex_entry.append('file = "%s"' % ', '.join(files)) elif field == 'series_index': bibtex_entry.append('volume = "%s"' % int(item)) elif field == 'timestamp': bibtex_entry.append('timestamp = "%s"' % isoformat(item).partition('T')[0]) elif field == 'pubdate': bibtex_entry.append('year = "%s"' % item.year) bibtex_entry.append( 'month = "%s"' % bibtexdict.utf8ToBibtex(strftime("%b", item))) elif field.startswith('#') and isinstance( item, string_or_bytes): bibtex_entry.append( 'custom_%s = "%s"' % (field[1:], bibtexdict.utf8ToBibtex(item))) elif isinstance(item, string_or_bytes): # elif field in ['title', 'publisher', 'cover', 'uuid', 'ondevice', # 'author_sort', 'series', 'title_sort'] : bibtex_entry.append('%s = "%s"' % (field, bibtexdict.utf8ToBibtex(item))) bibtex_entry = ',\n '.join(bibtex_entry) bibtex_entry += ' }\n\n' return bibtex_entry def check_entry_book_valid(entry): # Check that the required fields are ok for a book entry for field in ['title', 'authors', 'publisher']: if entry[field] is None or len(entry[field]) == 0: return False if entry['pubdate'] is None: return False else: return True def make_bibtex_citation(entry, template_citation, bibtexclass): # define a function to replace the template entry by its value def tpl_replace(objtplname): tpl_field = re.sub(r'[\{\}]', '', objtplname.group()) if tpl_field in TEMPLATE_ALLOWED_FIELDS: if tpl_field in ['pubdate', 'timestamp']: tpl_field = isoformat( entry[tpl_field]).partition('T')[0] elif tpl_field in ['tags', 'authors']: tpl_field = entry[tpl_field][0] elif tpl_field in ['id', 'series_index']: tpl_field = unicode_type(entry[tpl_field]) else: tpl_field = entry[tpl_field] return ascii_text(tpl_field) else: return '' if len(template_citation) > 0: tpl_citation = bibtexclass.utf8ToBibtex( bibtexclass.ValidateCitationKey( re.sub(r'\{[^{}]*\}', tpl_replace, template_citation))) if len(tpl_citation) > 0: return tpl_citation if len(entry["isbn"]) > 0: template_citation = '%s' % re.sub(r'[\D]', '', entry["isbn"]) else: template_citation = '%s' % unicode_type(entry["id"]) return bibtexclass.ValidateCitationKey(template_citation) self.fmt = path_to_output.rpartition('.')[2] self.notification = notification # Combobox options bibfile_enc = ['utf8', 'cp1252', 'ascii'] bibfile_enctag = ['strict', 'replace', 'ignore', 'backslashreplace'] bib_entry = ['mixed', 'misc', 'book'] # Needed beacause CLI return str vs int by widget try: bibfile_enc = bibfile_enc[opts.bibfile_enc] bibfile_enctag = bibfile_enctag[opts.bibfile_enctag] bib_entry = bib_entry[opts.bib_entry] except: if opts.bibfile_enc in bibfile_enc: bibfile_enc = opts.bibfile_enc else: log.warn("Incorrect --choose-encoding flag, revert to default") bibfile_enc = bibfile_enc[0] if opts.bibfile_enctag in bibfile_enctag: bibfile_enctag = opts.bibfile_enctag else: log.warn( "Incorrect --choose-encoding-configuration flag, revert to default" ) bibfile_enctag = bibfile_enctag[0] if opts.bib_entry in bib_entry: bib_entry = opts.bib_entry else: log.warn("Incorrect --entry-type flag, revert to default") bib_entry = bib_entry[0] if opts.verbose: opts_dict = vars(opts) log("%s(): Generating %s" % (self.name, self.fmt)) if opts.connected_device['is_device_connected']: log(" connected_device: %s" % opts.connected_device['name']) if opts_dict['search_text']: log(" --search='%s'" % opts_dict['search_text']) if opts_dict['ids']: log(" Book count: %d" % len(opts_dict['ids'])) if opts_dict['search_text']: log(" (--search ignored when a subset of the database is specified)" ) if opts_dict['fields']: if opts_dict['fields'] == 'all': log(" Fields: %s" % ', '.join(FIELDS[1:])) else: log(" Fields: %s" % opts_dict['fields']) log(" Output file will be encoded in %s with %s flag" % (bibfile_enc, bibfile_enctag)) log(" BibTeX entry type is %s with a citation like '%s' flag" % (bib_entry, opts_dict['bib_cit'])) # If a list of ids are provided, don't use search_text if opts.ids: opts.search_text = None data = self.search_sort_db(db, opts) if not len(data): log.error( "\nNo matching database entries for search criteria '%s'" % opts.search_text) # Get the requested output fields as a list fields = self.get_output_fields(db, opts) if not len(data): log.error( "\nNo matching database entries for search criteria '%s'" % opts.search_text) # Initialize BibTeX class bibtexc = BibTeX() # Entries writing after Bibtex formating (or not) if bibfile_enc != 'ascii': bibtexc.ascii_bibtex = False else: bibtexc.ascii_bibtex = True # Check citation choice and go to default in case of bad CLI if isinstance(opts.impcit, string_or_bytes): if opts.impcit == 'False': citation_bibtex = False elif opts.impcit == 'True': citation_bibtex = True else: log.warn("Incorrect --create-citation, revert to default") citation_bibtex = True else: citation_bibtex = opts.impcit # Check add file entry and go to default in case of bad CLI if isinstance(opts.addfiles, string_or_bytes): if opts.addfiles == 'False': addfiles_bibtex = False elif opts.addfiles == 'True': addfiles_bibtex = True else: log.warn("Incorrect --add-files-path, revert to default") addfiles_bibtex = True else: addfiles_bibtex = opts.addfiles # Preprocess for error and light correction template_citation = preprocess_template(opts.bib_cit) # Open output and write entries with codecs.open(path_to_output, 'w', bibfile_enc, bibfile_enctag)\ as outfile: # File header nb_entries = len(data) # check in book strict if all is ok else throw a warning into log if bib_entry == 'book': nb_books = len(list(filter(check_entry_book_valid, data))) if nb_books < nb_entries: log.warn("Only %d entries in %d are book compatible" % (nb_books, nb_entries)) nb_entries = nb_books # If connected device, add 'On Device' values to data if opts.connected_device[ 'is_device_connected'] and 'ondevice' in fields: for entry in data: entry[ 'ondevice'] = db.catalog_plugin_on_device_temp_mapping[ entry['id']]['ondevice'] # outfile.write('%%%Calibre catalog\n%%%{0} entries in catalog\n\n'.format(nb_entries)) outfile.write( '@preamble{"This catalog of %d entries was generated by calibre on %s"}\n\n' % (nb_entries, strftime("%A, %d. %B %Y %H:%M"))) for entry in data: outfile.write( create_bibtex_entry(entry, fields, bib_entry, template_citation, bibtexc, db, citation_bibtex, addfiles_bibtex))
def fget(self): if self._formatted_date is None: self._formatted_date = strftime(" [%a, %d %b %H:%M]", t=self.localtime.timetuple()) return self._formatted_date
def test_winutil(self): import tempfile from calibre import strftime from calibre_extensions import winutil self.assertEqual(winutil.special_folder_path(winutil.CSIDL_APPDATA), winutil.known_folder_path(winutil.FOLDERID_RoamingAppData)) self.assertEqual(winutil.special_folder_path(winutil.CSIDL_LOCAL_APPDATA), winutil.known_folder_path(winutil.FOLDERID_LocalAppData)) self.assertEqual(winutil.special_folder_path(winutil.CSIDL_FONTS), winutil.known_folder_path(winutil.FOLDERID_Fonts)) self.assertEqual(winutil.special_folder_path(winutil.CSIDL_PROFILE), winutil.known_folder_path(winutil.FOLDERID_Profile)) def au(x, name): self.assertTrue( isinstance(x, unicode_type), '%s() did not return a unicode string, instead returning: %r' % (name, x)) for x in 'username temp_path locale_name'.split(): au(getattr(winutil, x)(), x) d = winutil.localeconv() au(d['thousands_sep'], 'localeconv') au(d['decimal_point'], 'localeconv') for k, v in iteritems(d): au(v, k) os.environ['XXXTEST'] = 'YYY' self.assertEqual(getenv('XXXTEST'), 'YYY') del os.environ['XXXTEST'] self.assertIsNone(getenv('XXXTEST')) for k in os.environ: v = getenv(k) if v is not None: au(v, 'getenv-' + k) t = time.localtime() fmt = '%Y%a%b%e%H%M' for fmt in (fmt, fmt.encode('ascii')): x = strftime(fmt, t) au(x, 'strftime') tdir = tempfile.mkdtemp(dir=winutil.temp_path()) path = os.path.join(tdir, 'test-create-file.txt') h = winutil.create_file( path, winutil.GENERIC_READ | winutil.GENERIC_WRITE, 0, winutil.OPEN_ALWAYS, winutil.FILE_ATTRIBUTE_NORMAL) self.assertRaises(OSError, winutil.delete_file, path) del h winutil.delete_file(path) self.assertRaises(OSError, winutil.delete_file, path) self.assertRaises(OSError, winutil.create_file, os.path.join(path, 'cannot'), winutil.GENERIC_READ, 0, winutil.OPEN_ALWAYS, winutil.FILE_ATTRIBUTE_NORMAL) self.assertTrue(winutil.supports_hardlinks(os.path.abspath(os.getcwd())[0] + ':\\')) sz = 23 data = os.urandom(sz) open(path, 'wb').write(data) h = winutil.Handle(0, winutil.ModuleHandle, 'moo') r = repr(h) h2 = winutil.Handle(h.detach(), winutil.ModuleHandle, 'moo') self.assertEqual(r, repr(h2)) h2.close() h = winutil.create_file( path, winutil.GENERIC_READ | winutil.GENERIC_WRITE, 0, winutil.OPEN_ALWAYS, winutil.FILE_ATTRIBUTE_NORMAL) self.assertEqual(winutil.get_file_size(h), sz) self.assertRaises(OSError, winutil.set_file_pointer, h, 23, 23) self.assertEqual(winutil.read_file(h), data) self.assertEqual(winutil.read_file(h), b'') winutil.set_file_pointer(h, 3) self.assertEqual(winutil.read_file(h), data[3:]) self.assertEqual(winutil.nlinks(path), 1) npath = path + '.2' winutil.create_hard_link(npath, path) h.close() self.assertEqual(open(npath, 'rb').read(), data) self.assertEqual(winutil.nlinks(path), 2) winutil.delete_file(path) self.assertEqual(winutil.nlinks(npath), 1) winutil.set_file_attributes(npath, winutil.FILE_ATTRIBUTE_READONLY) self.assertRaises(OSError, winutil.delete_file, npath) winutil.set_file_attributes(npath, winutil.FILE_ATTRIBUTE_NORMAL) winutil.delete_file(npath) self.assertGreater(min(winutil.get_disk_free_space(None)), 0) open(path, 'wb').close() open(npath, 'wb').close() winutil.move_file(path, npath, winutil.MOVEFILE_WRITE_THROUGH | winutil.MOVEFILE_REPLACE_EXISTING) self.assertFalse(os.path.exists(path)) os.remove(npath) dpath = tempfile.mkdtemp(dir=os.path.dirname(path)) dh = winutil.create_file( dpath, winutil.FILE_LIST_DIRECTORY, winutil.FILE_SHARE_READ, winutil.OPEN_EXISTING, winutil.FILE_FLAG_BACKUP_SEMANTICS, ) from threading import Thread, Event started = Event() events = [] def read_changes(): buffer = b'0' * 8192 started.set() events.extend(winutil.read_directory_changes( dh, buffer, True, winutil.FILE_NOTIFY_CHANGE_FILE_NAME | winutil.FILE_NOTIFY_CHANGE_DIR_NAME | winutil.FILE_NOTIFY_CHANGE_ATTRIBUTES | winutil.FILE_NOTIFY_CHANGE_SIZE | winutil.FILE_NOTIFY_CHANGE_LAST_WRITE | winutil.FILE_NOTIFY_CHANGE_SECURITY )) t = Thread(target=read_changes, daemon=True) t.start() started.wait(1) t.join(0.1) testp = os.path.join(dpath, 'test') open(testp, 'w').close() t.join(4) self.assertTrue(events) for actions, path in events: self.assertEqual(os.path.join(dpath, path), testp) dh.close() os.remove(testp) os.rmdir(dpath) del h shutil.rmtree(tdir) m = winutil.create_mutex("test-mutex", False) self.assertRaises(OSError, winutil.create_mutex, 'test-mutex', False) m.close() self.assertEqual(winutil.parse_cmdline('"c:\\test exe.exe" "some arg" 2'), ('c:\\test exe.exe', 'some arg', '2'))
def build_index(books, num, search, sort, order, start, total, url_base, field_metadata, ctx, library_map, library_id): # {{{ logo = E.div(E.img(src=ctx.url_for('/static', what='calibre.png'), alt=__appname__), id='logo') search_box = build_search_box(num, search, sort, order, ctx, field_metadata) navigation = build_navigation(start, num, total, url_base) navigation2 = build_navigation(start, num, total, url_base) if library_map: choose_library = build_choose_library(ctx, library_map) books_table = E.table(id='listing') body = E.body( logo, search_box, navigation, E.hr(class_='spacer'), books_table, E.hr(class_='spacer'), navigation2 ) for book in books: thumbnail = E.td( E.img(type='image/jpeg', border='0', src=ctx.url_for('/get', what='thumb', book_id=book.id, library_id=library_id), class_='thumbnail') ) data = E.td() for fmt in book.formats or (): if not fmt or fmt.lower().startswith('original_'): continue s = E.span( E.a( fmt.lower(), href=ctx.url_for('/get', what=fmt, book_id=book.id, library_id=library_id) ), class_='button') s.tail = u'' data.append(s) div = E.div(class_='data-container') data.append(div) series = ('[%s - %s]'%(book.series, book.series_index)) if book.series else '' tags = ('Tags=[%s]'%', '.join(book.tags)) if book.tags else '' ctext = '' for key in filter(ctx.is_field_displayable, field_metadata.ignorable_field_keys()): fm = field_metadata[key] if fm['datatype'] == 'comments': continue name, val = book.format_field(key) if val: ctext += '%s=[%s] '%(name, val) first = E.span(u'\u202f%s %s by %s' % (book.title, series, authors_to_string(book.authors)), class_='first-line') div.append(first) ds = '' if is_date_undefined(book.timestamp) else strftime('%d %b, %Y', t=dt_as_local(book.timestamp).timetuple()) second = E.span(u'%s %s %s' % (ds, tags, ctext), class_='second-line') div.append(second) books_table.append(E.tr(thumbnail, data)) if library_map: body.append(choose_library) body.append(E.div( E.a(_('Switch to the full interface (non-mobile interface)'), href=ctx.url_for(None), style="text-decoration: none; color: blue", title=_('The full interface gives you many more features, ' 'but it may not work well on a small screen')), style="text-align:center") ) return E.html( E.head( E.title(__appname__ + ' Library'), E.link(rel='icon', href=ctx.url_for('/favicon.png'), type='image/png'), E.link(rel='stylesheet', type='text/css', href=ctx.url_for('/static', what='mobile.css')), E.link(rel='apple-touch-icon', href=ctx.url_for("/static", what='calibre.png')), E.meta(name="robots", content="noindex") ), # End head body ) # End html
def render_jacket(mi, output_profile, alt_title=_('Unknown'), alt_tags=[], alt_comments='', alt_publisher=(''), rescale_fonts=False): css = P('jacket/stylesheet.css', data=True).decode('utf-8') template = P('jacket/template.xhtml', data=True).decode('utf-8') try: title_str = mi.title if mi.title else alt_title except: title_str = _('Unknown') title = '<span class="title">%s</span>' % (escape(title_str)) series = Series(mi.series, mi.series_index) try: publisher = mi.publisher if mi.publisher else alt_publisher except: publisher = '' try: if is_date_undefined(mi.pubdate): pubdate = '' else: pubdate = strftime(u'%Y', mi.pubdate.timetuple()) except: pubdate = '' rating = get_rating(mi.rating, output_profile.ratings_char, output_profile.empty_ratings_char) tags = Tags((mi.tags if mi.tags else alt_tags), output_profile) comments = mi.comments if mi.comments else alt_comments comments = comments.strip() orig_comments = comments if comments: comments = comments_to_html(comments) try: author = mi.format_authors() except: author = '' def generate_html(comments): args = dict(xmlns=XHTML_NS, title_str=title_str, css=css, title=title, author=author, publisher=publisher, pubdate_label=_('Published'), pubdate=pubdate, series_label=_('Series'), series=series, rating_label=_('Rating'), rating=rating, tags_label=_('Tags'), tags=tags, comments=comments, footer='', searchable_tags=' '.join(escape(t)+'ttt' for t in tags.tags_list), ) for key in mi.custom_field_keys(): try: display_name, val = mi.format_field_extended(key)[:2] key = key.replace('#', '_') args[key] = escape(val) args[key+'_label'] = escape(display_name) except: # if the val (custom column contents) is None, don't add to args pass if False: print("Custom column values available in jacket template:") for key in args.keys(): if key.startswith('_') and not key.endswith('_label'): print(" %s: %s" % ('#' + key[1:], args[key])) # Used in the comment describing use of custom columns in templates # Don't change this unless you also change it in template.xhtml args['_genre_label'] = args.get('_genre_label', '{_genre_label}') args['_genre'] = args.get('_genre', '{_genre}') formatter = SafeFormatter() generated_html = formatter.format(template, **args) # Post-process the generated html to strip out empty header items soup = BeautifulSoup(generated_html) if not series: series_tag = soup.find(attrs={'class':'cbj_series'}) if series_tag is not None: series_tag.extract() if not rating: rating_tag = soup.find(attrs={'class':'cbj_rating'}) if rating_tag is not None: rating_tag.extract() if not tags: tags_tag = soup.find(attrs={'class':'cbj_tags'}) if tags_tag is not None: tags_tag.extract() if not pubdate: pubdate_tag = soup.find(attrs={'class':'cbj_pubdata'}) if pubdate_tag is not None: pubdate_tag.extract() if output_profile.short_name != 'kindle': hr_tag = soup.find('hr', attrs={'class':'cbj_kindle_banner_hr'}) if hr_tag is not None: hr_tag.extract() return strip_encoding_declarations( soup.renderContents('utf-8').decode('utf-8')) from calibre.ebooks.oeb.base import RECOVER_PARSER try: root = etree.fromstring(generate_html(comments), parser=RECOVER_PARSER) except: try: root = etree.fromstring(generate_html(escape(orig_comments)), parser=RECOVER_PARSER) except: root = etree.fromstring(generate_html(''), parser=RECOVER_PARSER) if rescale_fonts: # We ensure that the conversion pipeline will set the font sizes for # text in the jacket to the same size as the font sizes for the rest of # the text in the book. That means that as long as the jacket uses # relative font sizes (em or %), the post conversion font size will be # the same as for text in the main book. So text with size x em will # be rescaled to the same value in both the jacket and the main content. # # We cannot use calibre_rescale_100 on the body tag as that will just # give the body tag a font size of 1em, which is useless. for body in root.xpath('//*[local-name()="body"]'): fw = body.makeelement(XHTML('div')) fw.set('class', 'calibre_rescale_100') for child in body: fw.append(child) body.append(fw) from calibre.ebooks.oeb.polish.pretty import pretty_html_tree pretty_html_tree(None, root) return root
def generate_annotation_html(self, bookmark): from calibre.ebooks.BeautifulSoup import BeautifulSoup, Tag, NavigableString # Returns <div class="user_annotations"> ... </div> last_read_location = bookmark.last_read_location timestamp = datetime.datetime.utcfromtimestamp(bookmark.timestamp) percent_read = bookmark.percent_read ka_soup = BeautifulSoup() dtc = 0 divTag = Tag(ka_soup, "div") divTag["class"] = "user_annotations" # Add the last-read location spanTag = Tag(ka_soup, "span") spanTag["style"] = "font-weight:bold" if bookmark.book_format == "pdf": spanTag.insert( 0, NavigableString( _("%(time)s<br />Last Page Read: %(loc)d (%(pr)d%%)") % dict(time=strftime(u"%x", timestamp.timetuple()), loc=last_read_location, pr=percent_read) ), ) else: spanTag.insert( 0, NavigableString( _("%(time)s<br />Last Page Read: Location %(loc)d (%(pr)d%%)") % dict(time=strftime(u"%x", timestamp.timetuple()), loc=last_read_location, pr=percent_read) ), ) divTag.insert(dtc, spanTag) dtc += 1 divTag.insert(dtc, Tag(ka_soup, "br")) dtc += 1 if bookmark.user_notes: user_notes = bookmark.user_notes annotations = [] # Add the annotations sorted by location # Italicize highlighted text for location in sorted(user_notes): if user_notes[location]["text"]: annotations.append( _("<b>Location %(dl)d • %(typ)s</b><br />%(text)s<br />") % dict( dl=user_notes[location]["displayed_location"], typ=user_notes[location]["type"], text=( user_notes[location]["text"] if user_notes[location]["type"] == "Note" else "<i>%s</i>" % user_notes[location]["text"] ), ) ) else: if bookmark.book_format == "pdf": annotations.append( _("<b>Page %(dl)d • %(typ)s</b><br />") % dict(dl=user_notes[location]["displayed_location"], typ=user_notes[location]["type"]) ) else: annotations.append( _("<b>Location %(dl)d • %(typ)s</b><br />") % dict(dl=user_notes[location]["displayed_location"], typ=user_notes[location]["type"]) ) for annotation in annotations: divTag.insert(dtc, annotation) dtc += 1 ka_soup.insert(0, divTag) return ka_soup
def fd_format_ampm(dt, ampm, ap): res = strftime('%p', t=dt.timetuple()) if ap == 'AP': return res return res.lower()
def run(self, path_to_output, opts, db, notification=DummyReporter()): from calibre.library.catalogs.epub_mobi_builder import CatalogBuilder from calibre.utils.logging import default_log as log opts.log = log opts.fmt = self.fmt = path_to_output.rpartition('.')[2] # Add local options opts.creator = '%s, %s %s, %s' % (strftime('%A'), strftime('%B'), strftime('%d').lstrip('0'), strftime('%Y')) opts.creator_sort_as = '%s %s' % ('calibre', strftime('%Y-%m-%d')) opts.connected_kindle = False # Finalize output_profile op = opts.output_profile if op is None: op = 'default' if opts.connected_device['name'] and 'kindle' in opts.connected_device['name'].lower(): opts.connected_kindle = True if opts.connected_device['serial'] and \ opts.connected_device['serial'][:4] in ['B004', 'B005']: op = "kindle_dx" else: op = "kindle" opts.description_clip = 380 if op.endswith('dx') or 'kindle' not in op else 100 opts.author_clip = 100 if op.endswith('dx') or 'kindle' not in op else 60 opts.output_profile = op opts.basename = "Catalog" opts.cli_environment = not hasattr(opts, 'sync') # Hard-wired to always sort descriptions by author, with series after non-series opts.sort_descriptions_by_author = True build_log = [] build_log.append(u"%s('%s'): Generating %s %sin %s environment, locale: '%s'" % (self.name, current_library_name(), self.fmt, 'for %s ' % opts.output_profile if opts.output_profile else '', 'CLI' if opts.cli_environment else 'GUI', calibre_langcode_to_name(canonicalize_lang(get_lang()), localize=False)) ) # If exclude_genre is blank, assume user wants all tags as genres if opts.exclude_genre.strip() == '': #opts.exclude_genre = '\[^.\]' #build_log.append(" converting empty exclude_genre to '\[^.\]'") opts.exclude_genre = 'a^' build_log.append(" converting empty exclude_genre to 'a^'") if opts.connected_device['is_device_connected'] and \ opts.connected_device['kind'] == 'device': if opts.connected_device['serial']: build_log.append(u" connected_device: '%s' #%s%s " % \ (opts.connected_device['name'], opts.connected_device['serial'][0:4], 'x' * (len(opts.connected_device['serial']) - 4))) for storage in opts.connected_device['storage']: if storage: build_log.append(u" mount point: %s" % storage) else: build_log.append(u" connected_device: '%s'" % opts.connected_device['name']) try: for storage in opts.connected_device['storage']: if storage: build_log.append(u" mount point: %s" % storage) except: build_log.append(u" (no mount points)") else: build_log.append(u" connected_device: '%s'" % opts.connected_device['name']) opts_dict = vars(opts) if opts_dict['ids']: build_log.append(" book count: %d" % len(opts_dict['ids'])) sections_list = [] if opts.generate_authors: sections_list.append('Authors') if opts.generate_titles: sections_list.append('Titles') if opts.generate_series: sections_list.append('Series') if opts.generate_genres: sections_list.append('Genres') if opts.generate_recently_added: sections_list.append('Recently Added') if opts.generate_descriptions: sections_list.append('Descriptions') if not sections_list: if opts.cli_environment: opts.log.warn('*** No Section switches specified, enabling all Sections ***') opts.generate_authors = True opts.generate_titles = True opts.generate_series = True opts.generate_genres = True opts.generate_recently_added = True opts.generate_descriptions = True sections_list = ['Authors', 'Titles', 'Series', 'Genres', 'Recently Added', 'Descriptions'] else: opts.log.warn('\n*** No enabled Sections, terminating catalog generation ***') return ["No Included Sections", "No enabled Sections.\nCheck E-book options tab\n'Included sections'\n"] if opts.fmt == 'mobi' and sections_list == ['Descriptions']: warning = _("\n*** Adding 'By Authors' Section required for MOBI output ***") opts.log.warn(warning) sections_list.insert(0, 'Authors') opts.generate_authors = True opts.log(u" Sections: %s" % ', '.join(sections_list)) opts.section_list = sections_list # Limit thumb_width to 1.0" - 2.0" try: if float(opts.thumb_width) < float(self.THUMB_SMALLEST): log.warning("coercing thumb_width from '%s' to '%s'" % (opts.thumb_width, self.THUMB_SMALLEST)) opts.thumb_width = self.THUMB_SMALLEST if float(opts.thumb_width) > float(self.THUMB_LARGEST): log.warning("coercing thumb_width from '%s' to '%s'" % (opts.thumb_width, self.THUMB_LARGEST)) opts.thumb_width = self.THUMB_LARGEST opts.thumb_width = "%.2f" % float(opts.thumb_width) except: log.error("coercing thumb_width from '%s' to '%s'" % (opts.thumb_width, self.THUMB_SMALLEST)) opts.thumb_width = "1.0" # eval prefix_rules if passed from command line if type(opts.prefix_rules) is not tuple: try: opts.prefix_rules = eval(opts.prefix_rules) except: log.error("malformed --prefix-rules: %s" % opts.prefix_rules) raise for rule in opts.prefix_rules: if len(rule) != 4: log.error("incorrect number of args for --prefix-rules: %s" % repr(rule)) # eval exclusion_rules if passed from command line if type(opts.exclusion_rules) is not tuple: try: opts.exclusion_rules = eval(opts.exclusion_rules) except: log.error("malformed --exclusion-rules: %s" % opts.exclusion_rules) raise for rule in opts.exclusion_rules: if len(rule) != 3: log.error("incorrect number of args for --exclusion-rules: %s" % repr(rule)) # Display opts keys = opts_dict.keys() keys.sort() build_log.append(" opts:") for key in keys: if key in ['catalog_title', 'author_clip', 'connected_kindle', 'creator', 'cross_reference_authors', 'description_clip', 'exclude_book_marker', 'exclude_genre', 'exclude_tags', 'exclusion_rules', 'fmt', 'genre_source_field', 'header_note_source_field', 'merge_comments_rule', 'output_profile', 'prefix_rules', 'read_book_marker', 'search_text', 'sort_by', 'sort_descriptions_by_author', 'sync', 'thumb_width', 'use_existing_cover', 'wishlist_tag']: build_log.append(" %s: %s" % (key, repr(opts_dict[key]))) if opts.verbose: log('\n'.join(line for line in build_log)) self.opts = opts # Launch the Catalog builder catalog = CatalogBuilder(db, opts, self, report_progress=notification) if opts.verbose: log.info(" Begin catalog source generation") try: catalog.build_sources() if opts.verbose: log.info(" Completed catalog source generation\n") except (AuthorSortMismatchException, EmptyCatalogException), e: log.error(" *** Terminated catalog generation: %s ***" % e)
def generate_annotation_html(self, bookmark): from calibre.ebooks.BeautifulSoup import BeautifulSoup, Tag, NavigableString # Returns <div class="user_annotations"> ... </div> last_read_location = bookmark.last_read_location timestamp = datetime.datetime.utcfromtimestamp(bookmark.timestamp) percent_read = bookmark.percent_read ka_soup = BeautifulSoup() dtc = 0 divTag = Tag(ka_soup, 'div') divTag['class'] = 'user_annotations' # Add the last-read location spanTag = Tag(ka_soup, 'span') spanTag['style'] = 'font-weight:bold' if bookmark.book_format == 'pdf': spanTag.insert(0,NavigableString( _("%(time)s<br />Last Page Read: %(loc)d (%(pr)d%%)") % \ dict(time=strftime(u'%x', timestamp.timetuple()), loc=last_read_location, pr=percent_read))) else: spanTag.insert(0,NavigableString( _("%(time)s<br />Last Page Read: Location %(loc)d (%(pr)d%%)") % \ dict(time=strftime(u'%x', timestamp.timetuple()), loc=last_read_location, pr=percent_read))) divTag.insert(dtc, spanTag) dtc += 1 divTag.insert(dtc, Tag(ka_soup, 'br')) dtc += 1 if bookmark.user_notes: user_notes = bookmark.user_notes annotations = [] # Add the annotations sorted by location # Italicize highlighted text for location in sorted(user_notes): if user_notes[location]['text']: annotations.append( _('<b>Location %(dl)d • %(typ)s</b><br />%(text)s<br />') % \ dict(dl=user_notes[location]['displayed_location'], typ=user_notes[location]['type'], text=(user_notes[location]['text'] if \ user_notes[location]['type'] == 'Note' else \ '<i>%s</i>' % user_notes[location]['text']))) else: if bookmark.book_format == 'pdf': annotations.append( _('<b>Page %(dl)d • %(typ)s</b><br />') % \ dict(dl=user_notes[location]['displayed_location'], typ=user_notes[location]['type'])) else: annotations.append( _('<b>Location %(dl)d • %(typ)s</b><br />') % \ dict(dl=user_notes[location]['displayed_location'], typ=user_notes[location]['type'])) for annotation in annotations: divTag.insert(dtc, annotation) dtc += 1 ka_soup.insert(0, divTag) return ka_soup
def sony_metadata(oeb): m = oeb.metadata title = short_title = str(m.title[0]) publisher = __appname__ + ' ' + __version__ try: pt = str(oeb.metadata.publication_type[0]) short_title = ':'.join(pt.split(':')[2:]) except: pass try: date = parse_date(str(m.date[0]), as_utc=False).strftime('%Y-%m-%d') except: date = strftime('%Y-%m-%d') try: language = str(m.language[0]).replace('_', '-') except: language = 'en' short_title = xml(short_title, True) metadata = SONY_METADATA.format(title=xml(title), short_title=short_title, publisher=xml(publisher), issue_date=xml(date), language=xml(language)) updated = strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) def cal_id(x): for k, v in x.attrib.items(): if k.endswith('scheme') and v == 'uuid': return True try: base_id = str(list(filter(cal_id, m.identifier))[0]) except: base_id = str(uuid4()) toc = oeb.toc if False and toc.depth() < 3: # Single section periodical # Disabled since I prefer the current behavior from calibre.ebooks.oeb.base import TOC section = TOC(klass='section', title=_('All articles'), href=oeb.spine[2].href) for x in toc: section.nodes.append(x) toc = TOC(klass='periodical', href=oeb.spine[2].href, title=str(oeb.metadata.title[0])) toc.nodes.append(section) entries = [] seen_titles = set() for i, section in enumerate(toc): if not section.href: continue secid = 'section%d'%i sectitle = section.title if not sectitle: sectitle = _('Unknown') d = 1 bsectitle = sectitle while sectitle in seen_titles: sectitle = bsectitle + ' ' + str(d) d += 1 seen_titles.add(sectitle) sectitle = xml(sectitle, True) secdesc = section.description if not secdesc: secdesc = '' secdesc = xml(secdesc) entries.append(SONY_ATOM_SECTION.format(title=sectitle, href=section.href, id=xml(base_id)+'/'+secid, short_title=short_title, desc=secdesc, updated=updated)) for j, article in enumerate(section): if not article.href: continue atitle = article.title btitle = atitle d = 1 while atitle in seen_titles: atitle = btitle + ' ' + str(d) d += 1 auth = article.author if article.author else '' desc = section.description if not desc: desc = '' aid = 'article%d'%j entries.append(SONY_ATOM_ENTRY.format( title=xml(atitle), author=xml(auth), updated=updated, desc=desc, short_title=short_title, section_title=sectitle, href=article.href, word_count=str(1), id=xml(base_id)+'/'+secid+'/'+aid )) atom = SONY_ATOM.format(short_title=short_title, entries='\n\n'.join(entries), updated=updated, id=xml(base_id)).encode('utf-8') return metadata, atom
def run(self, path_to_output, opts, db, notification=DummyReporter()): from calibre.library.catalogs.epub_mobi_builder import CatalogBuilder from calibre.utils.logging import default_log as log # If preset specified from the cli, insert stored options from JSON file if hasattr(opts, 'preset') and opts.preset: available_presets = JSONConfig("catalog_presets") if not opts.preset in available_presets: if available_presets: print(_('Error: Preset "%s" not found.' % opts.preset)) print( _('Stored presets: %s' % ', '.join( [p for p in sorted(available_presets.keys())]))) else: print(_('Error: No stored presets.')) return 1 # Copy the relevant preset values to the opts object for item in available_presets[opts.preset]: if not item in [ 'exclusion_rules_tw', 'format', 'prefix_rules_tw' ]: setattr(opts, item, available_presets[opts.preset][item]) # Provide an unconnected device opts.connected_device = { 'is_device_connected': False, 'kind': None, 'name': None, 'save_template': None, 'serial': None, 'storage': None, } # Convert prefix_rules and exclusion_rules from JSON lists to tuples prs = [] for rule in opts.prefix_rules: prs.append(tuple(rule)) opts.prefix_rules = tuple(prs) ers = [] for rule in opts.exclusion_rules: ers.append(tuple(rule)) opts.exclusion_rules = tuple(ers) opts.log = log opts.fmt = self.fmt = path_to_output.rpartition('.')[2] # Add local options opts.creator = '%s, %s %s, %s' % (strftime('%A'), strftime('%B'), strftime('%d').lstrip('0'), strftime('%Y')) opts.creator_sort_as = '%s %s' % ('calibre', strftime('%Y-%m-%d')) opts.connected_kindle = False # Finalize output_profile op = opts.output_profile if op is None: op = 'default' if opts.connected_device['name'] and 'kindle' in opts.connected_device[ 'name'].lower(): opts.connected_kindle = True if opts.connected_device['serial'] and \ opts.connected_device['serial'][:4] in ['B004', 'B005']: op = "kindle_dx" else: op = "kindle" opts.description_clip = 380 if op.endswith( 'dx') or 'kindle' not in op else 100 opts.author_clip = 100 if op.endswith( 'dx') or 'kindle' not in op else 60 opts.output_profile = op opts.basename = "Catalog" opts.cli_environment = not hasattr(opts, 'sync') # Hard-wired to always sort descriptions by author, with series after non-series opts.sort_descriptions_by_author = True build_log = [] build_log.append( u"%s('%s'): Generating %s %sin %s environment, locale: '%s'" % (self.name, current_library_name(), self.fmt, 'for %s ' % opts.output_profile if opts.output_profile else '', 'CLI' if opts.cli_environment else 'GUI', calibre_langcode_to_name(canonicalize_lang(get_lang()), localize=False))) # If exclude_genre is blank, assume user wants all tags as genres if opts.exclude_genre.strip() == '': #opts.exclude_genre = '\[^.\]' #build_log.append(" converting empty exclude_genre to '\[^.\]'") opts.exclude_genre = 'a^' build_log.append(" converting empty exclude_genre to 'a^'") if opts.connected_device['is_device_connected'] and \ opts.connected_device['kind'] == 'device': if opts.connected_device['serial']: build_log.append(u" connected_device: '%s' #%s%s " % \ (opts.connected_device['name'], opts.connected_device['serial'][0:4], 'x' * (len(opts.connected_device['serial']) - 4))) for storage in opts.connected_device['storage']: if storage: build_log.append(u" mount point: %s" % storage) else: build_log.append(u" connected_device: '%s'" % opts.connected_device['name']) try: for storage in opts.connected_device['storage']: if storage: build_log.append(u" mount point: %s" % storage) except: build_log.append(u" (no mount points)") else: build_log.append(u" connected_device: '%s'" % opts.connected_device['name']) opts_dict = vars(opts) if opts_dict['ids']: build_log.append(" book count: %d" % len(opts_dict['ids'])) sections_list = [] if opts.generate_authors: sections_list.append('Authors') if opts.generate_titles: sections_list.append('Titles') if opts.generate_series: sections_list.append('Series') if opts.generate_genres: sections_list.append('Genres') if opts.generate_recently_added: sections_list.append('Recently Added') if opts.generate_descriptions: sections_list.append('Descriptions') if not sections_list: if opts.cli_environment: opts.log.warn( '*** No Section switches specified, enabling all Sections ***' ) opts.generate_authors = True opts.generate_titles = True opts.generate_series = True opts.generate_genres = True opts.generate_recently_added = True opts.generate_descriptions = True sections_list = [ 'Authors', 'Titles', 'Series', 'Genres', 'Recently Added', 'Descriptions' ] else: opts.log.warn( '\n*** No enabled Sections, terminating catalog generation ***' ) return [ "No Included Sections", "No enabled Sections.\nCheck E-book options tab\n'Included sections'\n" ] if opts.fmt == 'mobi' and sections_list == ['Descriptions']: warning = _( "\n*** Adding 'By Authors' Section required for MOBI output ***" ) opts.log.warn(warning) sections_list.insert(0, 'Authors') opts.generate_authors = True opts.log(u" Sections: %s" % ', '.join(sections_list)) opts.section_list = sections_list # Limit thumb_width to 1.0" - 2.0" try: if float(opts.thumb_width) < float(self.THUMB_SMALLEST): log.warning("coercing thumb_width from '%s' to '%s'" % (opts.thumb_width, self.THUMB_SMALLEST)) opts.thumb_width = self.THUMB_SMALLEST if float(opts.thumb_width) > float(self.THUMB_LARGEST): log.warning("coercing thumb_width from '%s' to '%s'" % (opts.thumb_width, self.THUMB_LARGEST)) opts.thumb_width = self.THUMB_LARGEST opts.thumb_width = "%.2f" % float(opts.thumb_width) except: log.error("coercing thumb_width from '%s' to '%s'" % (opts.thumb_width, self.THUMB_SMALLEST)) opts.thumb_width = "1.0" # eval prefix_rules if passed from command line if type(opts.prefix_rules) is not tuple: try: opts.prefix_rules = eval(opts.prefix_rules) except: log.error("malformed --prefix-rules: %s" % opts.prefix_rules) raise for rule in opts.prefix_rules: if len(rule) != 4: log.error( "incorrect number of args for --prefix-rules: %s" % repr(rule)) # eval exclusion_rules if passed from command line if type(opts.exclusion_rules) is not tuple: try: opts.exclusion_rules = eval(opts.exclusion_rules) except: log.error("malformed --exclusion-rules: %s" % opts.exclusion_rules) raise for rule in opts.exclusion_rules: if len(rule) != 3: log.error( "incorrect number of args for --exclusion-rules: %s" % repr(rule)) # Display opts keys = sorted(opts_dict.keys()) build_log.append(" opts:") for key in keys: if key in [ 'catalog_title', 'author_clip', 'connected_kindle', 'creator', 'cross_reference_authors', 'description_clip', 'exclude_book_marker', 'exclude_genre', 'exclude_tags', 'exclusion_rules', 'fmt', 'genre_source_field', 'header_note_source_field', 'merge_comments_rule', 'output_profile', 'prefix_rules', 'preset', 'read_book_marker', 'search_text', 'sort_by', 'sort_descriptions_by_author', 'sync', 'thumb_width', 'use_existing_cover', 'wishlist_tag' ]: build_log.append(" %s: %s" % (key, repr(opts_dict[key]))) if opts.verbose: log('\n'.join(line for line in build_log)) # Capture start_time opts.start_time = time.time() self.opts = opts if opts.verbose: log.info(" Begin catalog source generation (%s)" % str( datetime.timedelta(seconds=int(time.time() - opts.start_time)))) # Launch the Catalog builder catalog = CatalogBuilder(db, opts, self, report_progress=notification) try: catalog.build_sources() if opts.verbose: log.info(" Completed catalog source generation (%s)\n" % str( datetime.timedelta(seconds=int(time.time() - opts.start_time)))) except (AuthorSortMismatchException, EmptyCatalogException), e: log.error(" *** Terminated catalog generation: %s ***" % e)
def build_index(rd, books, num, search, sort, order, start, total, url_base, field_metadata, ctx, library_map, library_id): # {{{ logo = E.div(E.img(src=ctx.url_for('/static', what='calibre.png'), alt=__appname__), id='logo') search_box = build_search_box(num, search, sort, order, ctx, field_metadata, library_id) navigation = build_navigation(start, num, total, url_base) navigation2 = build_navigation(start, num, total, url_base) if library_map: choose_library = build_choose_library(ctx, library_map) books_table = E.table(id='listing') body = E.body(logo, search_box, navigation, E.hr(class_='spacer'), books_table, E.hr(class_='spacer'), navigation2) for book in books: thumbnail = E.td( E.img(type='image/jpeg', border='0', src=ctx.url_for('/get', what='thumb', book_id=book.id, library_id=library_id), class_='thumbnail')) data = E.td() for fmt in book.formats or (): if not fmt or fmt.lower().startswith('original_'): continue s = E.span(E.a(fmt.lower(), href=ctx.url_for('/legacy/get', what=fmt, book_id=book.id, library_id=library_id, filename=book_filename( rd, book.id, book, fmt))), class_='button') s.tail = u'' data.append(s) div = E.div(class_='data-container') data.append(div) series = ('[%s - %s]' % (book.series, book.series_index)) if book.series else '' tags = ('Tags=[%s]' % ', '.join(book.tags)) if book.tags else '' ctext = '' for key in filter(ctx.is_field_displayable, field_metadata.ignorable_field_keys()): fm = field_metadata[key] if fm['datatype'] == 'comments': continue name, val = book.format_field(key) if val: ctext += '%s=[%s] ' % (name, val) first = E.span('%s %s by %s' % (book.title, series, authors_to_string(book.authors)), class_='first-line') div.append(first) ds = '' if is_date_undefined(book.timestamp) else strftime( '%d %b, %Y', t=dt_as_local(book.timestamp).timetuple()) second = E.span('%s %s %s' % (ds, tags, ctext), class_='second-line') div.append(second) books_table.append(E.tr(thumbnail, data)) if library_map: body.append(choose_library) body.append( E.div(E.a(_('Switch to the full interface (non-mobile interface)'), href=ctx.url_for(None), style="text-decoration: none; color: blue", title=_('The full interface gives you many more features, ' 'but it may not work well on a small screen')), style="text-align:center")) return E.html( E.head( E.title(__appname__ + ' Library'), E.link(rel='icon', href=ctx.url_for('/favicon.png'), type='image/png'), E.link(rel='stylesheet', type='text/css', href=ctx.url_for('/static', what='mobile.css')), E.link(rel='apple-touch-icon', href=ctx.url_for("/static", what='calibre.png')), E.meta(name="robots", content="noindex")), # End head body) # End html
def render_jacket(mi, output_profile, alt_title=_('Unknown'), alt_tags=[], alt_comments='', alt_publisher='', rescale_fonts=False, alt_authors=None): css = P('jacket/stylesheet.css', data=True).decode('utf-8') template = P('jacket/template.xhtml', data=True).decode('utf-8') template = re.sub(r'<!--.*?-->', '', template, flags=re.DOTALL) css = re.sub(r'/\*.*?\*/', '', css, flags=re.DOTALL) try: title_str = alt_title if mi.is_null('title') else mi.title except: title_str = _('Unknown') title_str = escape(title_str) title = '<span class="title">%s</span>' % title_str series = Series(mi.series, mi.series_index) try: publisher = mi.publisher if not mi.is_null( 'publisher') else alt_publisher except: publisher = '' publisher = escape(publisher) try: if is_date_undefined(mi.pubdate): pubdate = '' else: dt = as_local_time(mi.pubdate) pubdate = strftime('%Y', dt.timetuple()) except: pubdate = '' rating = get_rating(mi.rating, output_profile.ratings_char, output_profile.empty_ratings_char) tags = Tags((mi.tags if mi.tags else alt_tags), output_profile) comments = mi.comments if mi.comments else alt_comments comments = comments.strip() if comments: comments = comments_to_html(comments) orig = mi.authors if mi.is_null('authors'): mi.authors = list(alt_authors or (_('Unknown'), )) try: author = mi.format_authors() except: author = '' mi.authors = orig author = escape(author) has_data = {} def generate_html(comments): display = Attributes() args = dict( xmlns=XHTML_NS, title_str=title_str, identifiers=Identifiers(mi.identifiers), css=css, title=title, author=author, publisher=publisher, pubdate_label=_('Published'), pubdate=pubdate, series_label=ngettext('Series', 'Series', 1), series=series, rating_label=_('Rating'), rating=rating, tags_label=_('Tags'), tags=tags, comments=comments, footer='', display=display, searchable_tags=' '.join( escape(t) + 'ttt' for t in tags.tags_list), ) for key in mi.custom_field_keys(): m = mi.get_user_metadata(key, False) or {} try: display_name, val = mi.format_field_extended(key)[:2] dkey = key.replace('#', '_') dt = m.get('datatype') if dt == 'series': args[dkey] = Series(mi.get(key), mi.get(key + '_index')) elif dt == 'rating': args[dkey] = rating_to_stars( mi.get(key), m.get('display', {}).get('allow_half_stars', False)) elif dt == 'comments': val = val or '' ctype = m.get('display', {}).get('interpret_as') or 'html' if ctype == 'long-text': val = '<pre style="white-space:pre-wrap">%s</pre>' % escape( val) elif ctype == 'short-text': val = '<span>%s</span>' % escape(val) elif ctype == 'markdown': val = markdown(val) else: val = comments_to_html(val) args[dkey] = val else: args[dkey] = escape(val) args[dkey + '_label'] = escape(display_name) setattr(display, dkey, 'none' if mi.is_null(key) else 'initial') except Exception: # if the val (custom column contents) is None, don't add to args pass if False: print("Custom column values available in jacket template:") for key in args.keys(): if key.startswith('_') and not key.endswith('_label'): print(" {}: {}".format('#' + key[1:], args[key])) # Used in the comment describing use of custom columns in templates # Don't change this unless you also change it in template.xhtml args['_genre_label'] = args.get('_genre_label', '{_genre_label}') args['_genre'] = args.get('_genre', '{_genre}') has_data['series'] = bool(series) has_data['tags'] = bool(tags) has_data['rating'] = bool(rating) has_data['pubdate'] = bool(pubdate) for k, v in has_data.items(): setattr(display, k, 'initial' if v else 'none') display.title = 'initial' if mi.identifiers: display.identifiers = 'initial' formatter = SafeFormatter() generated_html = formatter.format(template, **args) return strip_encoding_declarations(generated_html) from calibre.ebooks.oeb.polish.parsing import parse raw = generate_html(comments) root = parse(raw, line_numbers=False, force_html5_parse=True) if rescale_fonts: # We ensure that the conversion pipeline will set the font sizes for # text in the jacket to the same size as the font sizes for the rest of # the text in the book. That means that as long as the jacket uses # relative font sizes (em or %), the post conversion font size will be # the same as for text in the main book. So text with size x em will # be rescaled to the same value in both the jacket and the main content. # # We cannot use data-calibre-rescale 100 on the body tag as that will just # give the body tag a font size of 1em, which is useless. for body in root.xpath('//*[local-name()="body"]'): fw = body.makeelement(XHTML('div')) fw.set('data-calibre-rescale', '100') for child in body: fw.append(child) body.append(fw) postprocess_jacket(root, output_profile, has_data) from calibre.ebooks.oeb.polish.pretty import pretty_html_tree pretty_html_tree(None, root) return root
def create_bibtex_entry(entry, fields, mode, template_citation, bibtexdict, db, citation_bibtex=True, calibre_files=True): #Bibtex doesn't like UTF-8 but keep unicode until writing #Define starting chain or if book valid strict and not book return a Fail string bibtex_entry = [] if mode != "misc" and check_entry_book_valid(entry): bibtex_entry.append(u'@book{') elif mode != "book": bibtex_entry.append(u'@misc{') else: #case strict book return '' if citation_bibtex: # Citation tag bibtex_entry.append( make_bibtex_citation(entry, template_citation, bibtexdict)) bibtex_entry = [u' '.join(bibtex_entry)] for field in fields: if field.startswith('#'): item = db.get_field(entry['id'], field, index_is_id=True) if isinstance(item, (bool, float, int)): item = repr(item) elif field == 'title_sort': item = entry['sort'] elif field == 'library_name': item = library_name else: item = entry[field] #check if the field should be included (none or empty) if item is None: continue try: if len(item) == 0: continue except TypeError: pass if field == 'authors': bibtex_entry.append(u'author = "%s"' % bibtexdict.bibtex_author_format(item)) elif field == 'id': bibtex_entry.append(u'calibreid = "%s"' % int(item)) elif field == 'rating': bibtex_entry.append(u'rating = "%s"' % int(item)) elif field == 'size': bibtex_entry.append(u'%s = "%s octets"' % (field, int(item))) elif field == 'tags': #A list to flatten bibtex_entry.append( u'tags = "%s"' % bibtexdict.utf8ToBibtex(u', '.join(item))) elif field == 'comments': #\n removal item = item.replace(u'\r\n', u' ') item = item.replace(u'\n', u' ') # unmatched brace removal (users should use \leftbrace or \rightbrace for single braces) item = bibtexdict.stripUnmatchedSyntax(item, u'{', u'}') #html to text try: item = html2text(item) except: log.warn("Failed to convert comments to text") bibtex_entry.append(u'note = "%s"' % bibtexdict.utf8ToBibtex(item)) elif field == 'isbn': # Could be 9, 10 or 13 digits bibtex_entry.append(u'isbn = "%s"' % format_isbn(item)) elif field == 'formats': #Add file path if format is selected formats = [ format.rpartition('.')[2].lower() for format in item ] bibtex_entry.append(u'formats = "%s"' % u', '.join(formats)) if calibre_files: files = [u':%s:%s' % (format, format.rpartition('.')[2].upper())\ for format in item] bibtex_entry.append(u'file = "%s"' % u', '.join(files)) elif field == 'series_index': bibtex_entry.append(u'volume = "%s"' % int(item)) elif field == 'timestamp': bibtex_entry.append(u'timestamp = "%s"' % isoformat(item).partition('T')[0]) elif field == 'pubdate': bibtex_entry.append(u'year = "%s"' % item.year) bibtex_entry.append( u'month = "%s"' % bibtexdict.utf8ToBibtex(strftime("%b", item))) elif field.startswith('#') and isinstance(item, basestring): bibtex_entry.append( u'custom_%s = "%s"' % (field[1:], bibtexdict.utf8ToBibtex(item))) elif isinstance(item, basestring): # elif field in ['title', 'publisher', 'cover', 'uuid', 'ondevice', # 'author_sort', 'series', 'title_sort'] : bibtex_entry.append(u'%s = "%s"' % (field, bibtexdict.utf8ToBibtex(item))) bibtex_entry = u',\n '.join(bibtex_entry) bibtex_entry += u' }\n\n' return bibtex_entry
def sony_metadata(oeb): m = oeb.metadata title = short_title = unicode(m.title[0]) publisher = __appname__ + ' ' + __version__ try: pt = unicode(oeb.metadata.publication_type[0]) short_title = u':'.join(pt.split(':')[2:]) except: pass try: date = parse_date(unicode(m.date[0]), as_utc=False).strftime('%Y-%m-%d') except: date = strftime('%Y-%m-%d') try: language = unicode(m.language[0]).replace('_', '-') except: language = 'en' short_title = xml(short_title, True) metadata = SONY_METADATA.format(title=xml(title), short_title=short_title, publisher=xml(publisher), issue_date=xml(date), language=xml(language)) updated = strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) def cal_id(x): for k, v in x.attrib.items(): if k.endswith('scheme') and v == 'uuid': return True try: base_id = unicode(list(filter(cal_id, m.identifier))[0]) except: base_id = str(uuid4()) toc = oeb.toc if False and toc.depth() < 3: # Single section periodical # Disabled since I prefer the current behavior from calibre.ebooks.oeb.base import TOC section = TOC(klass='section', title=_('All articles'), href=oeb.spine[2].href) for x in toc: section.nodes.append(x) toc = TOC(klass='periodical', href=oeb.spine[2].href, title=unicode(oeb.metadata.title[0])) toc.nodes.append(section) entries = [] seen_titles = set([]) for i, section in enumerate(toc): if not section.href: continue secid = 'section%d'%i sectitle = section.title if not sectitle: sectitle = _('Unknown') d = 1 bsectitle = sectitle while sectitle in seen_titles: sectitle = bsectitle + ' ' + str(d) d += 1 seen_titles.add(sectitle) sectitle = xml(sectitle, True) secdesc = section.description if not secdesc: secdesc = '' secdesc = xml(secdesc) entries.append(SONY_ATOM_SECTION.format(title=sectitle, href=section.href, id=xml(base_id)+'/'+secid, short_title=short_title, desc=secdesc, updated=updated)) for j, article in enumerate(section): if not article.href: continue atitle = article.title btitle = atitle d = 1 while atitle in seen_titles: atitle = btitle + ' ' + str(d) d += 1 auth = article.author if article.author else '' desc = section.description if not desc: desc = '' aid = 'article%d'%j entries.append(SONY_ATOM_ENTRY.format( title=xml(atitle), author=xml(auth), updated=updated, desc=desc, short_title=short_title, section_title=sectitle, href=article.href, word_count=str(1), id=xml(base_id)+'/'+secid+'/'+aid )) atom = SONY_ATOM.format(short_title=short_title, entries='\n\n'.join(entries), updated=updated, id=xml(base_id)).encode('utf-8') return metadata, atom
def run(self, path_to_output, opts, db, notification=DummyReporter()): from calibre.library.catalogs.epub_mobi_builder import CatalogBuilder from calibre.utils.logging import default_log as log opts.log = log opts.fmt = self.fmt = path_to_output.rpartition('.')[2] # Add local options opts.creator = '%s, %s %s, %s' % (strftime('%A'), strftime('%B'), strftime('%d').lstrip('0'), strftime('%Y')) opts.creator_sort_as = '%s %s' % ('calibre', strftime('%Y-%m-%d')) opts.connected_kindle = False # Finalize output_profile op = opts.output_profile if op is None: op = 'default' if opts.connected_device['name'] and 'kindle' in opts.connected_device['name'].lower(): opts.connected_kindle = True if opts.connected_device['serial'] and \ opts.connected_device['serial'][:4] in ['B004','B005']: op = "kindle_dx" else: op = "kindle" opts.descriptionClip = 380 if op.endswith('dx') or 'kindle' not in op else 100 opts.authorClip = 100 if op.endswith('dx') or 'kindle' not in op else 60 opts.output_profile = op opts.basename = "Catalog" opts.cli_environment = not hasattr(opts,'sync') # Hard-wired to always sort descriptions by author, with series after non-series opts.sort_descriptions_by_author = True build_log = [] build_log.append(u"%s(): Generating %s %sin %s environment" % (self.name,self.fmt,'for %s ' % opts.output_profile if opts.output_profile else '', 'CLI' if opts.cli_environment else 'GUI')) # If exclude_genre is blank, assume user wants all genre tags included if opts.exclude_genre.strip() == '': opts.exclude_genre = '\[^.\]' build_log.append(" converting empty exclude_genre to '\[^.\]'") if opts.connected_device['is_device_connected'] and \ opts.connected_device['kind'] == 'device': if opts.connected_device['serial']: build_log.append(u" connected_device: '%s' #%s%s " % \ (opts.connected_device['name'], opts.connected_device['serial'][0:4], 'x' * (len(opts.connected_device['serial']) - 4))) for storage in opts.connected_device['storage']: if storage: build_log.append(u" mount point: %s" % storage) else: build_log.append(u" connected_device: '%s'" % opts.connected_device['name']) try: for storage in opts.connected_device['storage']: if storage: build_log.append(u" mount point: %s" % storage) except: build_log.append(u" (no mount points)") else: build_log.append(u" connected_device: '%s'" % opts.connected_device['name']) opts_dict = vars(opts) if opts_dict['ids']: build_log.append(" book count: %d" % len(opts_dict['ids'])) sections_list = [] if opts.generate_authors: sections_list.append('Authors') if opts.generate_titles: sections_list.append('Titles') if opts.generate_series: sections_list.append('Series') if opts.generate_genres: sections_list.append('Genres') if opts.generate_recently_added: sections_list.append('Recently Added') if opts.generate_descriptions: sections_list.append('Descriptions') if not sections_list: if opts.cli_environment: opts.log.warn('*** No Section switches specified, enabling all Sections ***') opts.generate_authors = True opts.generate_titles = True opts.generate_series = True opts.generate_genres = True opts.generate_recently_added = True opts.generate_descriptions = True sections_list = ['Authors','Titles','Series','Genres','Recently Added','Descriptions'] else: opts.log.warn('\n*** No enabled Sections, terminating catalog generation ***') return ["No Included Sections","No enabled Sections.\nCheck E-book options tab\n'Included sections'\n"] if opts.fmt == 'mobi' and sections_list == ['Descriptions']: warning = _("\n*** Adding 'By Authors' Section required for MOBI output ***") opts.log.warn(warning) sections_list.insert(0,'Authors') opts.generate_authors = True opts.log(u" Sections: %s" % ', '.join(sections_list)) opts.section_list = sections_list # Limit thumb_width to 1.0" - 2.0" try: if float(opts.thumb_width) < float(self.THUMB_SMALLEST): log.warning("coercing thumb_width from '%s' to '%s'" % (opts.thumb_width,self.THUMB_SMALLEST)) opts.thumb_width = self.THUMB_SMALLEST if float(opts.thumb_width) > float(self.THUMB_LARGEST): log.warning("coercing thumb_width from '%s' to '%s'" % (opts.thumb_width,self.THUMB_LARGEST)) opts.thumb_width = self.THUMB_LARGEST opts.thumb_width = "%.2f" % float(opts.thumb_width) except: log.error("coercing thumb_width from '%s' to '%s'" % (opts.thumb_width,self.THUMB_SMALLEST)) opts.thumb_width = "1.0" # Display opts keys = opts_dict.keys() keys.sort() build_log.append(" opts:") for key in keys: if key in ['catalog_title','authorClip','connected_kindle','descriptionClip', 'exclude_book_marker','exclude_genre','exclude_tags', 'header_note_source_field','merge_comments', 'output_profile','read_book_marker', 'search_text','sort_by','sort_descriptions_by_author','sync', 'thumb_width','wishlist_tag']: build_log.append(" %s: %s" % (key, repr(opts_dict[key]))) if opts.verbose: log('\n'.join(line for line in build_log)) self.opts = opts # Launch the Catalog builder catalog = CatalogBuilder(db, opts, self, report_progress=notification) if opts.verbose: log.info(" Begin catalog source generation") catalog.createDirectoryStructure() catalog.copyResources() catalog.calculateThumbnailSize() catalog_source_built = catalog.buildSources() if opts.verbose: if catalog_source_built: log.info(" Completed catalog source generation\n") else: log.error(" *** Terminated catalog generation, check log for details ***") if catalog_source_built: recommendations = [] recommendations.append(('remove_fake_margins', False, OptionRecommendation.HIGH)) recommendations.append(('comments', '', OptionRecommendation.HIGH)) # Use to debug generated catalog code before conversion #setattr(opts,'debug_pipeline',os.path.expanduser("~/Desktop/Catalog debug")) dp = getattr(opts, 'debug_pipeline', None) if dp is not None: recommendations.append(('debug_pipeline', dp, OptionRecommendation.HIGH)) if opts.fmt == 'mobi' and opts.output_profile and opts.output_profile.startswith("kindle"): recommendations.append(('output_profile', opts.output_profile, OptionRecommendation.HIGH)) recommendations.append(('no_inline_toc', True, OptionRecommendation.HIGH)) recommendations.append(('book_producer',opts.output_profile, OptionRecommendation.HIGH)) # If cover exists, use it cpath = None try: search_text = 'title:"%s" author:%s' % ( opts.catalog_title.replace('"', '\\"'), 'calibre') matches = db.search(search_text, return_matches=True) if matches: cpath = db.cover(matches[0], index_is_id=True, as_path=True) if cpath and os.path.exists(cpath): recommendations.append(('cover', cpath, OptionRecommendation.HIGH)) except: pass # Run ebook-convert from calibre.ebooks.conversion.plumber import Plumber plumber = Plumber(os.path.join(catalog.catalogPath, opts.basename + '.opf'), path_to_output, log, report_progress=notification, abort_after_input_dump=False) plumber.merge_ui_recommendations(recommendations) plumber.run() try: os.remove(cpath) except: pass # returns to gui2.actions.catalog:catalog_generated() return catalog.error
def generate_annotation_html(self, bookmark): from calibre.ebooks.BeautifulSoup import BeautifulSoup, Tag, NavigableString # Returns <div class="user_annotations"> ... </div> last_read_location = bookmark.last_read_location timestamp = datetime.datetime.utcfromtimestamp(bookmark.timestamp) percent_read = bookmark.percent_read ka_soup = BeautifulSoup() dtc = 0 divTag = Tag(ka_soup,'div') divTag['class'] = 'user_annotations' # Add the last-read location spanTag = Tag(ka_soup, 'span') spanTag['style'] = 'font-weight:bold' if bookmark.book_format == 'pdf': spanTag.insert(0,NavigableString( _("%(time)s<br />Last Page Read: %(loc)d (%(pr)d%%)") % dict( time=strftime(u'%x', timestamp.timetuple()), loc=last_read_location, pr=percent_read))) else: spanTag.insert(0,NavigableString( _("%(time)s<br />Last Page Read: Location %(loc)d (%(pr)d%%)") % dict( time=strftime(u'%x', timestamp.timetuple()), loc=last_read_location, pr=percent_read))) divTag.insert(dtc, spanTag) dtc += 1 divTag.insert(dtc, Tag(ka_soup,'br')) dtc += 1 if bookmark.user_notes: user_notes = bookmark.user_notes annotations = [] # Add the annotations sorted by location # Italicize highlighted text for location in sorted(user_notes): if user_notes[location]['text']: annotations.append( _('<b>Location %(dl)d • %(typ)s</b><br />%(text)s<br />') % dict( dl=user_notes[location]['displayed_location'], typ=user_notes[location]['type'], text=(user_notes[location]['text'] if user_notes[location]['type'] == 'Note' else '<i>%s</i>' % user_notes[location]['text']))) else: if bookmark.book_format == 'pdf': annotations.append( _('<b>Page %(dl)d • %(typ)s</b><br />') % dict( dl=user_notes[location]['displayed_location'], typ=user_notes[location]['type'])) else: annotations.append( _('<b>Location %(dl)d • %(typ)s</b><br />') % dict( dl=user_notes[location]['displayed_location'], typ=user_notes[location]['type'])) for annotation in annotations: divTag.insert(dtc, annotation) dtc += 1 ka_soup.insert(0,divTag) return ka_soup
def render_jacket(mi, output_profile, alt_title=_('Unknown'), alt_tags=[], alt_comments='', alt_publisher=('')): css = P('jacket/stylesheet.css', data=True).decode('utf-8') try: title_str = mi.title if mi.title else alt_title except: title_str = _('Unknown') title = '<span class="title">%s</span>' % (escape(title_str)) series = escape(mi.series if mi.series else '') if mi.series and mi.series_index is not None: series += escape(' [%s]' % mi.format_series_index()) if not mi.series: series = '' try: publisher = mi.publisher if mi.publisher else alt_publisher except: publisher = '' try: if is_date_undefined(mi.pubdate): pubdate = '' else: pubdate = strftime(u'%Y', mi.pubdate.timetuple()) except: pubdate = '' rating = get_rating(mi.rating, output_profile.ratings_char, output_profile.empty_ratings_char) tags = mi.tags if mi.tags else alt_tags if tags: tags = output_profile.tags_to_string(tags) else: tags = '' comments = mi.comments if mi.comments else alt_comments comments = comments.strip() orig_comments = comments if comments: comments = comments_to_html(comments) try: author = mi.format_authors() except: author = '' def generate_html(comments): args = dict(xmlns=XHTML_NS, title_str=title_str, css=css, title=title, author=author, publisher=publisher, pubdate_label=_('Published'), pubdate=pubdate, series_label=_('Series'), series=series, rating_label=_('Rating'), rating=rating, tags_label=_('Tags'), tags=tags, comments=comments, footer='') for key in mi.custom_field_keys(): try: display_name, val = mi.format_field_extended(key)[:2] key = key.replace('#', '_') args[key] = escape(val) args[key + '_label'] = escape(display_name) except: # if the val (custom column contents) is None, don't add to args pass if False: print("Custom column values available in jacket template:") for key in args.keys(): if key.startswith('_') and not key.endswith('_label'): print(" %s: %s" % ('#' + key[1:], args[key])) # Used in the comment describing use of custom columns in templates # Don't change this unless you also change it in template.xhtml args['_genre_label'] = args.get('_genre_label', '{_genre_label}') args['_genre'] = args.get('_genre', '{_genre}') generated_html = P('jacket/template.xhtml', data=True).decode('utf-8').format(**args) # Post-process the generated html to strip out empty header items soup = BeautifulSoup(generated_html) if not series: series_tag = soup.find(attrs={'class': 'cbj_series'}) if series_tag is not None: series_tag.extract() if not rating: rating_tag = soup.find(attrs={'class': 'cbj_rating'}) if rating_tag is not None: rating_tag.extract() if not tags: tags_tag = soup.find(attrs={'class': 'cbj_tags'}) if tags_tag is not None: tags_tag.extract() if not pubdate: pubdate_tag = soup.find(attrs={'class': 'cbj_pubdata'}) if pubdate_tag is not None: pubdate_tag.extract() if output_profile.short_name != 'kindle': hr_tag = soup.find('hr', attrs={'class': 'cbj_kindle_banner_hr'}) if hr_tag is not None: hr_tag.extract() return strip_encoding_declarations( soup.renderContents('utf-8').decode('utf-8')) from calibre.ebooks.oeb.base import RECOVER_PARSER try: root = etree.fromstring(generate_html(comments), parser=RECOVER_PARSER) except: try: root = etree.fromstring(generate_html(escape(orig_comments)), parser=RECOVER_PARSER) except: root = etree.fromstring(generate_html(''), parser=RECOVER_PARSER) return root
def render_jacket(mi, output_profile, alt_title=_('Unknown'), alt_tags=[], alt_comments='', alt_publisher='', rescale_fonts=False, alt_authors=None): css = P('jacket/stylesheet.css', data=True).decode('utf-8') template = P('jacket/template.xhtml', data=True).decode('utf-8') template = re.sub(r'<!--.*?-->', '', template, flags=re.DOTALL) css = re.sub(r'/\*.*?\*/', '', css, flags=re.DOTALL) try: title_str = alt_title if mi.is_null('title') else mi.title except: title_str = _('Unknown') title_str = escape(title_str) title = '<span class="title">%s</span>' % title_str series = Series(mi.series, mi.series_index) try: publisher = mi.publisher if not mi.is_null('publisher') else alt_publisher except: publisher = '' publisher = escape(publisher) try: if is_date_undefined(mi.pubdate): pubdate = '' else: dt = as_local_time(mi.pubdate) pubdate = strftime(u'%Y', dt.timetuple()) except: pubdate = '' rating = get_rating(mi.rating, output_profile.ratings_char, output_profile.empty_ratings_char) tags = Tags((mi.tags if mi.tags else alt_tags), output_profile) comments = mi.comments if mi.comments else alt_comments comments = comments.strip() orig_comments = comments if comments: comments = comments_to_html(comments) orig = mi.authors if mi.is_null('authors'): mi.authors = list(alt_authors or (_('Unknown'),)) try: author = mi.format_authors() except: author = '' mi.authors = orig author = escape(author) has_data = {} def generate_html(comments): args = dict(xmlns=XHTML_NS, title_str=title_str, css=css, title=title, author=author, publisher=publisher, pubdate_label=_('Published'), pubdate=pubdate, series_label=_('Series'), series=series, rating_label=_('Rating'), rating=rating, tags_label=_('Tags'), tags=tags, comments=comments, footer='', searchable_tags=' '.join(escape(t)+'ttt' for t in tags.tags_list), ) for key in mi.custom_field_keys(): m = mi.get_user_metadata(key, False) or {} try: display_name, val = mi.format_field_extended(key)[:2] dkey = key.replace('#', '_') dt = m.get('datatype') if dt == 'series': args[dkey] = Series(mi.get(key), mi.get(key + '_index')) elif dt == 'rating': args[dkey] = rating_to_stars(mi.get(key), m.get('display', {}).get('allow_half_stars', False)) elif dt == 'comments': val = val or '' display = m.get('display', {}) ctype = display.get('interpret_as') or 'html' if ctype == 'long-text': val = '<pre style="white-space:pre-wrap">%s</pre>' % escape(val) elif ctype == 'short-text': val = '<span>%s</span>' % escape(val) elif ctype == 'markdown': val = markdown(val) else: val = comments_to_html(val) args[dkey] = val else: args[dkey] = escape(val) args[dkey+'_label'] = escape(display_name) except Exception: # if the val (custom column contents) is None, don't add to args pass if False: print("Custom column values available in jacket template:") for key in args.keys(): if key.startswith('_') and not key.endswith('_label'): print(" %s: %s" % ('#' + key[1:], args[key])) # Used in the comment describing use of custom columns in templates # Don't change this unless you also change it in template.xhtml args['_genre_label'] = args.get('_genre_label', '{_genre_label}') args['_genre'] = args.get('_genre', '{_genre}') formatter = SafeFormatter() generated_html = formatter.format(template, **args) has_data['series'] = bool(series) has_data['tags'] = bool(tags) has_data['rating'] = bool(rating) has_data['pubdate'] = bool(pubdate) return strip_encoding_declarations(generated_html) from calibre.ebooks.oeb.base import RECOVER_PARSER try: root = etree.fromstring(generate_html(comments), parser=RECOVER_PARSER) except: try: root = etree.fromstring(generate_html(escape(orig_comments)), parser=RECOVER_PARSER) except: root = etree.fromstring(generate_html(''), parser=RECOVER_PARSER) if rescale_fonts: # We ensure that the conversion pipeline will set the font sizes for # text in the jacket to the same size as the font sizes for the rest of # the text in the book. That means that as long as the jacket uses # relative font sizes (em or %), the post conversion font size will be # the same as for text in the main book. So text with size x em will # be rescaled to the same value in both the jacket and the main content. # # We cannot use calibre_rescale_100 on the body tag as that will just # give the body tag a font size of 1em, which is useless. for body in root.xpath('//*[local-name()="body"]'): fw = body.makeelement(XHTML('div')) fw.set('class', 'calibre_rescale_100') for child in body: fw.append(child) body.append(fw) postprocess_jacket(root, output_profile, has_data) from calibre.ebooks.oeb.polish.pretty import pretty_html_tree pretty_html_tree(None, root) return root
def get_components(template, mi, id, timefmt='%b %Y', length=250, sanitize_func=ascii_filename, replace_whitespace=False, to_lowercase=False, safe_format=True): tsorder = tweaks['save_template_title_series_sorting'] format_args = FORMAT_ARGS.copy() format_args.update(mi.all_non_none_fields()) if mi.title: if tsorder == 'strictly_alphabetic': v = mi.title else: # title_sort might be missing or empty. Check both conditions v = mi.get('title_sort', None) if not v: v = title_sort(mi.title, order=tsorder) format_args['title'] = v if mi.authors: format_args['authors'] = mi.format_authors() format_args['author'] = format_args['authors'] if mi.tags: format_args['tags'] = mi.format_tags() if format_args['tags'].startswith('/'): format_args['tags'] = format_args['tags'][1:] else: format_args['tags'] = '' if mi.series: format_args['series'] = title_sort(mi.series, order=tsorder) if mi.series_index is not None: format_args['series_index'] = mi.format_series_index() else: template = re.sub(r'\{series_index[^}]*?\}', '', template) if mi.rating is not None: format_args['rating'] = mi.format_rating(divide_by=2.0) if mi.identifiers: format_args['identifiers'] = mi.format_field_extended('identifiers')[1] else: format_args['identifiers'] = '' if hasattr(mi.timestamp, 'timetuple'): format_args['timestamp'] = strftime(timefmt, mi.timestamp.timetuple()) if hasattr(mi.pubdate, 'timetuple'): format_args['pubdate'] = strftime(timefmt, mi.pubdate.timetuple()) if hasattr(mi, 'last_modified') and hasattr(mi.last_modified, 'timetuple'): format_args['last_modified'] = strftime(timefmt, mi.last_modified.timetuple()) format_args['id'] = str(id) # Now format the custom fields custom_metadata = mi.get_all_user_metadata(make_copy=False) for key in custom_metadata: if key in format_args: cm = custom_metadata[key] if cm['datatype'] == 'series': format_args[key] = title_sort(format_args[key], order=tsorder) if key+'_index' in format_args: format_args[key+'_index'] = fmt_sidx(format_args[key+'_index']) elif cm['datatype'] == 'datetime': format_args[key] = strftime(timefmt, format_args[key].timetuple()) elif cm['datatype'] == 'bool': format_args[key] = _('yes') if format_args[key] else _('no') elif cm['datatype'] == 'rating': format_args[key] = mi.format_rating(format_args[key], divide_by=2.0) elif cm['datatype'] in ['int', 'float']: if format_args[key] != 0: format_args[key] = unicode(format_args[key]) else: format_args[key] = '' if safe_format: components = Formatter().safe_format(template, format_args, 'G_C-EXCEPTION!', mi) else: components = Formatter().unsafe_format(template, format_args, mi) components = [x.strip() for x in components.split('/')] components = [sanitize_func(x) for x in components if x] if not components: components = [str(id)] if to_lowercase: components = [x.lower() for x in components] if replace_whitespace: components = [re.sub(r'\s', '_', x) for x in components] return shorten_components_to(length, components)
def __init__(self, parent, db): QDialog.__init__(self, parent) self.setupUi(self) self.um_label.setText(self.um_label.text() % localize_user_manual_link( 'https://manual.calibre-ebook.com/gui.html#the-search-interface')) for val, text in [(0, '')] + [ (i, strftime('%B', date(2010, i, 1).timetuple())) for i in xrange(1, 13) ]: self.date_month.addItem(text, val) for val, text in [('today', _('Today')), ('yesterday', _('Yesterday')), ('thismonth', _('This month'))]: self.date_human.addItem(text, val) self.date_year.setValue(now().year) self.date_day.setSpecialValueText(u' \xa0') vals = [((v['search_terms'] or [k])[0], v['name'] or k) for k, v in db.field_metadata.iteritems() if v.get('datatype', None) == 'datetime'] for k, v in sorted(vals, key=lambda (k, v): sort_key(v)): self.date_field.addItem(v, k) self.date_year.valueChanged.connect( lambda: self.sel_date.setChecked(True)) self.date_month.currentIndexChanged.connect( lambda: self.sel_date.setChecked(True)) self.date_day.valueChanged.connect( lambda: self.sel_date.setChecked(True)) self.date_daysago.valueChanged.connect( lambda: self.sel_daysago.setChecked(True)) self.date_ago_type.addItems( [_('days'), _('weeks'), _('months'), _('years')]) self.date_human.currentIndexChanged.connect( lambda: self.sel_human.setChecked(True)) init_dateop(self.dateop_date) self.sel_date.setChecked(True) self.mc = '' searchables = sorted(db.field_metadata.searchable_fields(), key=lambda x: sort_key(x if x[0] != '#' else x[1:])) self.general_combo.addItems(searchables) all_authors = db.all_authors() all_authors.sort(key=lambda x: sort_key(x[1])) self.authors_box.setEditText('') self.authors_box.set_separator('&') self.authors_box.set_space_before_sep(True) self.authors_box.set_add_separator( tweaks['authors_completer_append_separator']) self.authors_box.update_items_cache(db.all_author_names()) all_series = db.all_series() all_series.sort(key=lambda x: sort_key(x[1])) self.series_box.set_separator(None) self.series_box.update_items_cache([x[1] for x in all_series]) self.series_box.show_initial_value('') all_tags = db.all_tags() self.tags_box.update_items_cache(all_tags) self.box_last_values = copy.deepcopy(box_values) if self.box_last_values: for k, v in self.box_last_values.items(): if k == 'general_index': continue getattr(self, k).setText(v) self.general_combo.setCurrentIndex( self.general_combo.findText( self.box_last_values['general_index'])) self.clear_button.clicked.connect(self.clear_button_pushed) current_tab = gprefs.get('advanced search dialog current tab', 0) self.tabWidget.setCurrentIndex(current_tab) if current_tab == 1: self.matchkind.setCurrentIndex(last_matchkind) self.tabWidget.currentChanged[int].connect(self.tab_changed) self.tab_changed(current_tab) self.resize(self.sizeHint())