def serve(books, size, session): """ Output a gallery of coverpages. """ cherrypy.response.headers['Content-Type'] = 'text/html; charset=utf-8' cherrypy.response.headers['Content-Language'] = 'en' s = '' for book_id in books: dc = DublinCoreMapping.DublinCoreObject(session=session, pooled=True) dc.load_from_database(book_id) cover = session.execute( select(Models.File.archive_path).where( Models.File.fk_books == book_id, Models.File.fk_filetypes == size)).scalars().first() if not cover: continue url = '/' + cover href = '/ebooks/%d' % book_id if dc.title: title = gg.xmlspecialchars(dc.title) # handles <,>,& #Shortening long titles for latest covers title = title.replace('"', '"') title = title.replace("'", ''') else: title = '!! missing title !!' short_title = dc.make_pretty_title() def author_name(author): return DublinCore.DublinCore.make_pretty_name(author.name) author_name_list = map(author_name, dc.authors) authors = ', '.join(author_name_list) s += f""" <a href="{href}" title="{title}" authors="{authors}" target="_top"> <div class="cover_image"> <div class="cover_img"> <img src="{url}" alt="{title}, {authors}" title="{title}" authors="{authors}" draggable="false"> </div> <div class="cover_title"> <h5>{short_title}</h5> </div> </div> </a> """ return s.encode('utf-8')
def serve(rows, size): """ Output a gallery of coverpages. """ cherrypy.response.headers['Content-Type'] = 'text/html; charset=utf-8' cherrypy.response.headers['Content-Language'] = 'en' s = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" lang="en" xml:lang="en" xml:base="http://www.gutenberg.org"> <head> <title>Cover Flow</title> <style> .cover-thumb { display: inline-block; background-position: center; background-repeat: no-repeat; } .cover-thumb-small { width: 76px; height: 110px; } .cover-thumb-medium { width: 210px; height: 310px; } </style> </head> <body><div>""" for row in rows: url = '/' + row.filename href = '/ebooks/%d' % row.pk title = gg.xmlspecialchars(row.title) title = title.replace('"', '"') s += """<a href="{href}" title="{title}" class="cover-thumb cover-thumb-{size}" target="_top" style="background-image: url({url})"> </a>\n""".format( url=url, href=href, title=title, size=size) return (s + '</div></body></html>\n').encode('utf-8')
def serve(rows, size): """ Output a gallery of coverpages. """ cherrypy.response.headers['Content-Type'] = 'text/html; charset=utf-8' cherrypy.response.headers['Content-Language'] = 'en' s = '' for row in rows: url = '/' + row.filename href = '/ebooks/%d' % row.pk if row.title: title = gg.xmlspecialchars(row.title) # handles <,>,& #Shortening long titles for latest covers title = title.replace('"', '"') title = title.replace("'", ''') else: title = '!! missing title !!' short_title = title title_len = len(title) short_title = re.sub(r"\-+", " ", short_title) short_title = short_title.splitlines()[0] if (title_len > 80): short_title = textwrap.wrap(short_title, 80)[0] s += """ <a href="{href}" title="{title}" target="_top"> <div class="cover_image"> <div class="cover_img"> <img src="{url}" alt="{title}" title="{title}" draggable="false"> </div> <div class="cover_title"> <h5>{short_title}</h5> </div> </div> </a> """.format(url=url, href=href, title=title, short_title=short_title, size=size) return s.encode('utf-8')
def parse(self): """ Parse the plain text. Try to find semantic units in the character soup. """ debug("GutenbergTextParser.parse () ...") if self.xhtml is not None: return text = self.unicode_content() text = parsers.RE_RESTRICTED.sub('', text) text = gg.xmlspecialchars(text) lines = [line.rstrip() for line in text.splitlines()] lines.append("") del text blanks = 0 par = Par() for line in lines: if len(line) == 0: blanks += 1 else: if blanks and par.lines: # don't append empty pars par.after = blanks self.pars.append(par) if self.body == 1: self.max_blanks = max(blanks, self.max_blanks) par = Par() par.before = blanks blanks = 0 par.lines.append(line) par.after = blanks if par.lines: self.pars.append(par) lines = None self.analyze() # build xhtml tree em = parsers.em self.xhtml = em.html( em.head( em.title(' '), # pylint: disable=W0142 em.meta(**{ 'http-equiv': 'Content-Style-Type', 'content': 'text/css' }), em.meta( **{ 'http-equiv': 'Content-Type', 'content': mt.xhtml + '; charset=utf-8' })), em.body()) for body in xpath(self.xhtml, '//xhtml:body'): xhtmlparser = lxml.html.XHTMLParser() for par in self.pars: p = etree.fromstring(self.ship_out(par), xhtmlparser) p.tail = '\n\n' body.append(p) self.pars = []