def gen_posts(p_list): index = builder.HTML( builder.HEAD(*make_head()), builder.BODY(make_menu(), builder.DIV( builder.H1("Posts", builder.CLASS("section-title")), *make_short_posts(p_list), builder.CLASS("section")), style="background-color:#f7f7f7")) print(html.etree.tostring(index, pretty_print=True, method='html').decode("utf-8"), file=open(os.path.join(base_path, "posts.html"), "w")) for post in p_list: html_content = builder.DIV( builder.H1(post.title, builder.CLASS("full-post-title")), builder.DIV(post.date.strftime("%d %B %Y, %H:%M"), builder.CLASS("full-post-date")), builder.DIV(html.fromstring(post.content), builder.CLASS("full-post-content")), builder.CLASS("full-post-container")) page = builder.HTML( builder.HEAD( *make_head(), builder.SCRIPT("", src=base_url + "js/table.js"), ), builder.BODY(make_menu(), html_content)) print(html.etree.tostring(page, pretty_print=True, method='html').decode("utf-8"), file=open( os.path.join(base_path, "posts", post.id, "index.html"), "w"))
def test_fixup_arch(self): replacement = h.H1("I am the greatest title alive!") result = self.registry('ir.ui.view').replace_arch_section( self.cr, self.uid, self.view_id, '/div/div[1]/h3', replacement) self.eq( result, h.DIV( h.DIV(h.H3("I am the greatest title alive!"), h.UL(h.LI("Item 1"), h.LI("Item 2"), h.LI("Item 3"))), h.DIV( h.H3("Column 2"), h.UL( h.LI("Item 1"), h.LI( h.SPAN( "My Company", attrs(model='res.company', id=1, field='name', type='char'))), h.LI( h.SPAN( "+00 00 000 00 0 000", attrs(model='res.company', id=1, field='phone', type='char')))))))
def process_jsonfiles(zipdir): """Process all JSON files in the resulting directory :param zipdir: zipdir name :type zipdir: Path | str """ body = gen_html() for jfile in listjsonfiles(str(zipdir)): content = load_jsonfile(jfile) # Create title div = E.DIV(E.H1(content.get("date_journal"))) # Create date: div.append(E.H5(content.get("address"))) # Create photos: divimg = E.DIV() for image in content.get('photos'): img = E.IMG( src=image, width="600", ) divimg.append(img) div.append(divimg) # Create text: text = content["text"] = markdown.markdown(content["text"]) texthtml = fromstring(text) div.append(E.P(texthtml)) body.append(div) return body
def parse_wiki(tree, title): root = tree.getroot() parser_div = root.xpath("//div[@class='mw-parser-output']")[0] headers = ["h1","h2","h3","h4","h5","h6"] children = parser_div.getchildren() text = "" header = "" html = "" for child in children: if child.tag == "p": text += child.text_content().lstrip().rstrip() elif child.tag in headers: if len(text) > 0: summary = summarize(text, limit=2) html += "<h2>"+header+"</h2><p>"+summary+"</p>" text = "" header = child.text_content().split("[")[0] print(header) # TODO - add style sheet # TODO - format text html_out = E.HTML( E.HEAD( E.TITLE(title) ), E.BODY( E.H1(E.CLASS("heading"), title), lxml.html.fromstring(html) ) ) html_out.getroottree().write(file="summarized-roanoke.html", method="html")
def make_short_papers(p_list, count=None): authors_dict = json.load(open("authors.json")) def gen_author_link(a): if a in authors_dict: return "<a href=\"" + authors_dict[a] + "\">" + a + "</a>" else: return a tag_list = [] for paper in p_list[:count]: authors = gen_author_link(paper.authors[0]) for a in paper.authors[1:-1]: authors += ", " + gen_author_link(a) if len(paper.authors) > 1: authors += " and " + gen_author_link(paper.authors[-1]) links = [] for key in paper.links: links += [builder.A(key, href=paper.links[key])] tag_list.append( builder.DIV( builder.H1(paper.title, builder.CLASS("paper-title")), builder.DIV(html.fromstring(authors), builder.CLASS("paper-authors")), builder.DIV(paper.date.strftime("%d %b. %Y"), builder.CLASS("paper-date")), builder.DIV(*links, builder.CLASS("paper-links")), builder.CLASS("paper-container"))) return tag_list
def login_page_gen(): html = E.HTML( E.HEAD( E.LINK(rel="stylesheet", href="/css/deeplight.css", type="text/css"), E.TITLE("Administration and moderation") ), E.BODY( E.H1(E.CLASS("heading"), "Farlight Engine Imageboard"), E.P(E.CLASS("loginmessage"), "You need to login"), E.FORM(E.CLASS("loginform"), E.TABLE( E.TR(E.TD('LOGIN'), E.TD(E.INPUT(type = 'text', name = 'login', value = '')) ), E.TR(E.TD('PASSWORD'), E.TD(E.INPUT(type = 'text', name = 'password', value = '')) ), ), E.INPUT(type = 'submit', value = 'LOGIN'), method = 'POST', action = '/admin/login' ) ) ) return lxml.html.tostring(html)
def password_change_menu(): html = E.HTML( E.HEAD( E.LINK(rel="stylesheet", href="/css/deeplight.css", type="text/css"), E.TITLE("Administration and moderation") ), E.BODY( E.H1(E.CLASS("heading"), "Farlight Engine Imageboard"), E.P(E.CLASS("loginmessage"), "Change your password"), E.FORM(E.CLASS("loginform"), E.INPUT(type = 'hidden', name = 'action', value = 'change'), E.INPUT(type = 'hidden', name = 'instance', value = 'password'), E.TABLE( E.TR(E.TD('OLD PASSWORD'), E.TD(E.INPUT(type = 'password', name = 'old_passwd', value = '')) ), E.TR(E.TD('NEW PASSWORD'), E.TD(E.INPUT(type = 'password', name = 'new_passwd', value = '')) ), E.TR(E.TD('NEW PASSWORD AGAIN'), E.TD(E.INPUT(type = 'password', name = 'new_passwd_again', value = '')) ), ), E.INPUT(type = 'submit', value = 'LOGIN'), method = 'POST', action = '/admin' ) ) ) return lxml.html.tostring(html)
def gen_papers(p_list): index = builder.HTML( builder.HEAD(*make_head()), builder.BODY( make_menu(), builder.DIV(builder.H1("Papers", builder.CLASS("section-title")), *make_short_papers(p_list), builder.CLASS("section")))) print(html.etree.tostring(index, pretty_print=True, method='html').decode("utf-8"), file=open(os.path.join(base_path, "papers.html"), "w"))
def visit_Component(self, node): inst = getattr(self, 'inst', None) if inst: title = 'Instance {} of {} Register Map'.format(inst, node.name) else: title = 'Base {} Register Map'.format(node.name) self.title = title # Create the main content by sweeping the tree. bc = E.DIV(id='breadcrumbs') try: if self.breadcrumbs is not None: bc.append(self.breadcrumbs) except AttributeError: pass ww = node.width // 8 an = ((node.size - 1).bit_length() + 3) // 4 with self.tempvars(wordwidth=ww, address_nibbles=an, hlev=2): nodes = ([E.H1(title, id='title'), bc] + [E.P(d) for d in node.description] + [c for c in self.visitchildren(node)] + [self.footer(node)]) contentnode = E.DIV(*nodes, id='content') # Add a table of contents sidebar. We'll assume that everything that # wants to be in the TOC is already a heading and just work from there. h2list = E.UL() for elem in contentnode.iter('h2', 'h3'): id = escape(elem.text) elem.attrib['id'] = id if elem.tag == 'h2': h2node = E.LI(E.A(elem.text, href='#' + id)) h2list.append(h2node) h3list = None else: if h3list is None: h3list = E.UL() h2list.append(h3list) h3list.append(E.LI(E.A(elem.text, href='#' + id))) # Put it all together. return E.HTML( E.HEAD( E.TITLE(title), E.LINK(rel='stylesheet', type='text/css', href=htmlpathjoin(self.styledir, 'reg.css'))), E.BODY( E.DIV(E.DIV(E.P(E.A(title, href='#title')), h2list, id='sidebar'), contentnode, id='wrapper')), )
def generateHTML(city_info): html = E.HTML( E.HEAD(E.TITLE("Weather for:{}".format(city_info['city']))), E.BODY(E.H1("Weather for:{}".format(city_info['city'])), E.P("{}".format(city_info['city_temp'])), E.P("{}".format(city_info['city_forecast'])), E.P("{}".format(city_info['city_min'])), E.P("{}".format(city_info['city_max'])), E.P("{}".format(city_info['city_time'])))) byteString = lxml.html.tostring(html) string = byteString.decode('utf-8') return string
def make_short_posts(p_list): tag_list = [] for post in p_list: tag_list.append( builder.DIV( builder.H1( builder.A(post.title, href=base_url + "posts/" + post.id), builder.CLASS("post-title")), html.fromstring(post.summary), builder.DIV(post.date.strftime("%d %b. %Y, %H:%M"), builder.CLASS("post-date")), builder.CLASS("post-container"))) return tag_list
def getCity(city): weatherStuff = weatherHandler(city) weatherStuff.get_weather() html = E.HTML( E.HEAD(E.TITLE("Weather for:{}".format(weatherStuff.city))), E.BODY(E.H1("Weather for:{}".format(weatherStuff.city)), E.P("{}".format(weatherStuff.formatted_temp)), E.P("{}".format(weatherStuff.formatted_weather)), E.P("{}".format(weatherStuff.formatted_minMax)), E.P("{}".format(weatherStuff.formatted_time)))) byteString = lxml.html.tostring(html) string = byteString.decode('utf-8') return string
def manifest_html(self): """ This is manifest.html the human useable form of the manifest.xml special object to list needed criteria or return a manifest given a set of criteria """ web_page = \ E.HTML( E.HEAD( E.TITLE(_("%s A/I Webserver -- " "Maninfest Criteria Test") % _DISTRIBUTION) ), E.BODY( E.H1(_("Welcome to the %s A/I " "webserver") % _DISTRIBUTION), E.H2(_("Manifest criteria tester")), E.P(_("To test a system's criteria, all " "criteria listed are necessary. The " "format used should be:"), E.BR(), E.TT("criteria1=value1;criteria2=value2"), E.BR(), _("For example:"), E.BR(), E.TT("arch=sun4u;mac=EEE0C0FFEE00;" "ipv4=172020025012;" "manufacturer=sun microsystems") ), E.H1(_("Criteria:")), E.P(str(list(AIdb.getCriteria( self.AISQL.getQueue(), strip=True)))), E.FORM(E.INPUT(type="text", name="postData"), E.INPUT(type="submit"), action="manifest.xml", method="POST" ) ) ) return lxml.etree.tostring(web_page, pretty_print=True)
def header(self): """Make the header bar of the webpage""" return E.E.header( E.ATTR(id='header'), E.DIV( E.ATTR(id='title'), E.H1( E.A( 'GCC Python Plugin', href='http://gcc-python-plugin.readthedocs.org/', ), ), E.DIV( E.ATTR(id='info'), E.SPAN( E.CLASS('label'), 'Filename: ', ), self.data['filename'], E.SPAN( E.CLASS('label'), 'Function: ', ), self.data['function']['name'], ), E.DIV( E.ATTR(id='report-pagination'), E.SPAN( E.CLASS('label'), 'Report: ', ), *( E.A(str(i + 1), href="#state{0}".format(i + 1)) for i in range(len(self.data['reports'])) ) ), E.DIV( E.ATTR(id='prev'), E.IMG( src=data_uri('image/png', 'images/arrow-180.png'), ), ), E.DIV( E.ATTR(id='next'), E.IMG( src=data_uri('image/png', 'images/arrow.png'), ), ), ), )
def main(results_file): summary = {} with open(results_file, 'r') as f: summary = json.loads(f.read()) summary['directory'] = os.path.dirname(results_file) date = datetime.datetime.fromtimestamp( summary['timestamp']).strftime('%Y-%m-%d-%H:%M:%S') token_create_rps = summary['token_creation']['requests_per_second'] token_create_tps = summary['token_creation']['time_per_request'] token_validate_rps = summary['token_validation']['requests_per_second'] token_validate_tps = summary['token_validation']['time_per_request'] index = e.HTML( e.HEAD(e.LINK(rel='stylesheet', type='text/css', href='theme.css'), e.TITLE('OpenStack Keystone Performance')), e.BODY( e.DIV(e.H1('OpenStack Keystone Performance'), e.P('Published reports after each merged patch.', CLASS('subtitle')), id='header'), e.DIV( e.P('Last run date: ' + date, ), e.P( 'keystone SHA: ', e.A(summary['sha'], target='_blank', href=KEYSTONE_LINK + summary['sha'])), e.P( 'os_keystone SHA: ', e.A(summary['osa_sha'], target='_blank', href=OSA_LINK + summary['osa_sha'])), e.P(e.A('Performance Data', href=PERF_LINK, target='_blank')), e.DIV(CLASS('left'), e.H2('Create Token'), e.P(e.STRONG(token_create_rps), ' requests per second'), e.P(e.STRONG(token_create_tps), ' ms per request')), e.DIV( CLASS('right'), e.H2('Validate Token'), e.P(e.STRONG(token_validate_rps), ' requests per second'), e.P(e.STRONG(token_validate_tps), ' ms per request')), id='content'), e.DIV(e.P( 'Results provided by the ', e.A('OSIC Performance Bot', target='_blank', href=BOT_LINK)), id='footer'))) with open(os.path.join(summary['directory'], 'index.html'), 'w') as f: f.write(et.tostring(index))
def preface_html( self, novel: Novel, urls: List[NovelUrl], metadata: List[MetaData] ) -> epub.EpubHtml: synopsis_section = E.DIV( E.H4("Synopsis"), E.DIV( *[E.P(para) for para in novel.synopsis.splitlines()], style="padding: 0 1rem", ), ) meta_by_name: Dict[str, List[MetaData]] = {} for item in metadata: meta_by_name.setdefault(item.name, []).append(item) metadata_sections = [] for name, items in meta_by_name.items(): item_strings = [metadata_helper.display_value(item) for item in items] section = E.DIV( E.H4(name.capitalize()), E.DIV( ", ".join(item_strings), style="padding: 0 1rem", ), ) metadata_sections.append(section) html = E.DIV( E.H1(novel.title, style="margin-bottom: 0;"), E.DIV(f"by: {novel.author}"), synopsis_section, *metadata_sections, E.DIV( E.H4("Sources"), E.DIV( *[E.A(n_url.url, href=n_url.url) for n_url in urls], style="padding: 0 1rem", ), ), ) return epub.EpubHtml( title="Preface", file_name="preface.xhtml", content=lxml.html.tostring(html), lang=novel.lang, )
def write_heading(self, text, bullet=None, autoAnchor=None, anchor=None, level=1): # Use a hierarchy of header tags if docmapping set h = E.H1() if self.pis['docmapping'] == 'yes': if level == 2: h = E.H2() elif level == 3: h = E.H3() elif level >= 4: h = E.H4() if autoAnchor: h.attrib['id'] = autoAnchor if bullet: # Use separate elements for bullet and text a_bullet = E.A(bullet) a_bullet.tail = ' ' if autoAnchor: a_bullet.attrib['href'] = '#' + autoAnchor h.append(a_bullet) if anchor: # Use an anchor link for heading a_text = E.A( text.strip(), href='#' + anchor, ) a_text.attrib["id"] = anchor h.append(a_text) else: # Plain text a_bullet.tail += text else: # Only use one <a> pointing to autoAnchor a = E.A(text) if autoAnchor: a.attrib['href'] = '#' + autoAnchor h.append(a) # Add to body buffer self.buf.append(self._serialize(h))
def header(self): """Make the header bar of the webpage""" return E.E.header( E.ATTR(id='header'), E.DIV( E.ATTR(id='title'), E.H1('GCC Python Plugin', ), E.DIV( E.ATTR(id='filename'), E.SPAN( E.CLASS('label'), 'Filename: ', ), self.data['filename'], ), ), E.E.nav( E.ATTR(id='nav'), E.DIV( E.ATTR(id='function'), E.H3('Function'), self.data['function']['name'], ), E.DIV( E.ATTR(id='report-pagination'), E.H3('Report'), *(E.A(str(i + 1), href="#state{0}".format(i + 1)) for i in range(len(self.data['reports'])))), E.DIV( E.ATTR(id='bug-toggle'), E.IMG(src='images/bug.png', ), E.H3('Bug'), ' [count]', ), E.DIV( E.ATTR(id='prev'), E.IMG(src='images/arrow-180.png', ), ), E.DIV( E.ATTR(id='next'), E.IMG(src='images/arrow.png', ), ), ), )
def visit_MemoryMap(self, node): """Create an HTML file for a MemoryMap.""" self.title = title = node.name + ' Peripheral Map' an = ((node.size - 1).bit_length() + 3) // 4 # Sweep the document tree to build up the main content with self.tempvars(wordwidth=1, address_nibbles=an, base=node.base, subdir=node.name + '_instances', hlev=2): children = list(self.visitchildren(node)) table = E.TABLE( E.TR(E.TH('Peripheral'), E.TH('Base Address'), E.TH('Size'), E.TH('Description'), *children), CLASS('component_list')) nodes = ([E.H1(title, id='title')] + [E.P(d) for d in node.description] + [E.HR(), table, self.footer(node)]) contentnode = E.DIV(*nodes, id='content') # Add a table of contents sidebar for each table row. instlist = E.UL() for elem in contentnode.xpath("//td[contains(@class, 'peripheral')]"): text = tostring(elem, method="text", encoding="unicode") id = escape(text) elem.attrib['id'] = id node = E.LI(E.A(text, href='#' + id)) instlist.append(node) # And put it all together. return E.HTML( E.HEAD( E.TITLE(title), E.LINK(rel='stylesheet', type='text/css', href=htmlpathjoin(self.styledir, 'reg.css'))), E.BODY( E.DIV(E.DIV(E.P(E.A(title, href='#title')), instlist, id='sidebar'), contentnode, id='wrapper')), )
def _parse_chapter(self, url): self.info('Parsing chapter: %s', url) doc = self.fetch(url) title, = doc.xpath('//title/text()') title = title.split(' | ')[-1].strip() yield E.H1(title) for tag in doc.get_element_by_id('content'): # Start of section if tag.tag == 'h3': yield E.H2(tag.text_content().strip()) # Section content elif 'grafset' in tag.classes: yield from tag # End note elif 'endnote' in tag.classes: # Change tag into a <blockquote class="endnote"> tag.tag = 'blockquote' yield tag
def parse(self, url): doc = self.fetch(url) title, = doc.xpath('//meta[@property="og:title"]/@content') self.metadata['title'] = title.strip() self.metadata['author'] = 'Keira Markos' content, = doc.find_class('entry-content') index_div = content.find_class('wordpress-post-tabs') chapter_links = content.xpath('.//a[contains(text(), "Chapter")]') if index_div: # Tabs # FIXME: a couple stories actually put the whole text of the story # in wordpress-post-tabs, not just links to chapters chapter_links = index_div[0].xpath( './/div[starts-with(@id, "tabs-")][1]//a') # Drop the entire tags <div> index_div[0].drop_tree() elif chapter_links: # Links to chapters for link in chapter_links: # This also kills the tail text, but that is actually desirable link.getparent().remove(link) else: # Single chapter story yield from self._parse_chapter(url, strip_authors_note=False) return yield from content for link in chapter_links: yield E.H1(link.text_content()) yield from self._parse_chapter(link.get('href'))
def parse(self, url): doc = self.fetch(self.url) self.metadata['title'] = 'Worm' self.metadata['author'] = 'Wildbow' categories = doc.get_element_by_id('categories-2') # Iterate over each arc <ul> in table of contents for arc in categories.xpath('.//ul[not(li/ul)]'): # Title is in previous <a> arc_title = arc.getprevious().text # Arc 10 has a leading soft hyphen, do some cleanup arc_title = arc_title.replace('\u00ad', '').strip() # Skip chapters that are included in Ward if arc_title == 'Stories (Pre-Worm 2)': continue yield E.H1(arc_title) for chapter in arc.iter('a'): yield from self._parse_chapter(chapter.get('href'))
def list_manifests(service): '''Replies to the client with criteria list for a service. The output should be similar to installadm list. Args service - the name of the service being listed Returns None Raises None ''' print 'Content-Type: text/html' # HTML is following print # blank line, end of headers print '<html>' print '<head>' sys.stdout.write('<title>%s %s</title>' % (_('Manifest list for'), service)) print '</head><body>' port = 0 try: smf.AISCF(FMRI="system/install/server") except KeyError: # report the internal error to error_log and requesting client sys.stderr.write( _("error:The system does not have the " "system/install/server SMF service.")) sys.stdout.write( _("error:The system does not have the " "system/install/server SMF service.")) return services = config.get_all_service_names() if not services: # report the error to the requesting client only sys.stdout.write(_('error:no services on this server.\n')) return found = False if config.is_service(service): service_ctrl = AIService(service) found = True # assume new service setup path = service_ctrl.database_path if os.path.exists(path): try: aisql = AIdb.DB(path) aisql.verifyDBStructure() except StandardError as err: # report the internal error to error_log and # requesting client sys.stderr.write( _('error:AI database access ' 'error\n%s\n') % err) sys.stdout.write( _('error:AI database access ' 'error\n%s\n') % err) return # generate the list of criteria for the criteria table header criteria_header = E.TR() for crit in AIdb.getCriteria(aisql.getQueue(), strip=False): criteria_header.append(E.TH(crit)) # generate the manifest rows for the criteria table body names = AIdb.getManNames(aisql.getQueue()) table_body = E.TR() allcrit = AIdb.getCriteria(aisql.getQueue(), strip=False) colspan = str(max(len(list(allcrit)), 1)) for manifest in names: # iterate through each manifest (and instance) for instance in range( 0, AIdb.numInstances(manifest, aisql.getQueue())): table_body.append(E.TR()) # print the manifest name only once (from instance 0) if instance == 0: href = '../' + service + '/' + manifest row = str(AIdb.numInstances(manifest, aisql.getQueue())) table_body.append( E.TD(E.A(manifest, href=href, rowspan=row))) else: table_body.append(E.TD()) crit_pairs = AIdb.getManifestCriteria(manifest, instance, aisql.getQueue(), onlyUsed=True, humanOutput=True) # crit_pairs is an SQLite3 row object which doesn't # support iteritems(), etc. for crit in crit_pairs.keys(): formatted_val = AIdb.formatValue( crit, crit_pairs[crit]) # if we do not get back a valid value ensure a # hyphen is printed (prevents "" from printing) if formatted_val and crit_pairs[crit]: table_body.append( E.TD(formatted_val, align="center")) else: table_body.append( E.TD(lxml.etree.Entity("nbsp"), align="center")) # print the default manifest at the end of the table, # which has the same colspan as the Criteria List label else: href = '../' + service + '/default.xml' table_body.append( E.TR( E.TD(E.A("Default", href=href)), E.TD(lxml.etree.Entity("nbsp"), colspan=colspan, align="center"))) web_page = E.HTML( E.HEAD(E.TITLE(_("OmniOS Automated " "Installation Webserver"))), E.BODY( E.H1( _("Welcome to the OmniOS " "Automated Installation webserver!")), E.P( _("Service '%s' has the following " "manifests available, served to clients " "matching required criteria.") % service), E.TABLE(E.TR(E.TH(_("Manifest"), rowspan="2"), E.TH(_("Criteria List"), colspan=colspan)), criteria_header, table_body, border="1", align="center"), )) print lxml.etree.tostring(web_page, pretty_print=True) # service is not found, provide available services on host if not found: sys.stdout.write(_('Service <i>%s</i> not found. ') % service) sys.stdout.write(_('Available services are:<p><ol><i>')) host = socket.gethostname() for service_name in config.get_all_service_names(): # assume new service setup port = config.get_service_port(service_name) sys.stdout.write( '<a href="http://%s:%d/cgi-bin/' 'cgi_get_manifest.py?version=%s&service=%s">%s</a><br>\n' % (host, port, VERSION, service_name, service_name)) sys.stdout.write('</i></ol>%s' % _('Please select a service ' 'from the above list.')) print '</body></html>'
"Databases": "Databases (Custom)", "Design": "Design (Custom)", "General": "General (Custom)", "Integration": "Integration (Custom)", "Misc1": "Misc 1 (Custom)", "Misc2": "Misc 2 (Custom)", "Revisions": "Revisions (Custom)", "Shapes": "Shapes (Custom)", "Structure": "Structure (Custom)" } css = """\ img { border: 1px black solid } * { font-family: Arial; } """ paths = glob.glob("d:/Inetpub/wwwroot/images/xmetal/*.jpg") content = B.CENTER(B.H1("XMetaL CDR Icons")) for path in sorted(paths): if "xmetal_cdr" not in path and "Standard" not in path: name = path.replace("\\", "/").split("/")[-1][:-4] content.append(B.H2(names.get(name, name))) content.append(B.IMG(src="/images/xmetal/%s.jpg" % name)) page = B.HTML( B.HEAD( B.TITLE("XMetaL CDR Icons"), B.STYLE(css) ), B.BODY(content) ) xml = etree.tostring(page, pretty_print=True, encoding="unicode") print(f"Content-type: text/html\n\n{xml}")
termin["studiengang"] = studiengaenge[counter] termin["id"] = id id += 1 tabelle.append(termin) #if "s_termin_typ" in termin: # print(" " + termin["s_termin_typ"]) # print(" " + termin["s_termin_von"] + " - " + termin["s_termin_bis"]) counter += 1 # Alle Module eingeladen, hoffe ich htmlfile = E.HTML( E.HEAD(E.LINK(rel="stylesheet", href="plan.css", type="text/css"), E.SCRIPT(src="plan.js", type="text/javascript"), E.META(charset="utf-8"), E.TITLE("Test")), E.BODY(E.H1("Stundenplan"))) document = htmlfile.find("body") for studiengang in studiengaenge: print(studiengang) document.append(E.H2(studiengang, name=studiengang)) container = E.DIV(E.CLASS("plancontainer")) # E.DIV(E.CLASS("plancontainer")) for stunde in range(59): style = "top: " + str(2 + (100 / 60) * stunde) + "%; " mnt = str(int((stunde + 2) % 4 * 15)) if mnt == "0": mnt = "00" container.append(
print "Hello, Winnipeg crime!" import scraperwiki html = scraperwiki.scrape("http://www.winnipeg.ca/police/press/2012/01jan/2012_01_31.stm") print html import lxml.html root = lxml.html.fromstring(html) from lxml.html import builder as E from lxml.html import usedoctest html = E.HTML( E.HEAD( E.LINK(rel="stylesheet", href="great.css", type="text/css"), E.TITLE("Best Page Ever") E.BODY( E.H1(E.CLASS("heading"), "Top News"), E.P("World News only on this page", style="font-size: 200%"), "Ah, and here's some more text, by the way.", lxml.html.fromstring("<p>... and this is a parsed fragment ...</p>") ..) ..) >>> print lxml.html.tostring(html) <html> <head> <link href="great.css" rel="stylesheet" type="text/css"> <title>Best Page Ever</title> </head> <body> <h1 class="heading">Top News</h1> <p style="font-size: 200%">World News only on this page</p> Ah, and here's some more text, by the way. <p>... and this is a parsed fragment ...</p>
def index(self): """ The server's main page """ # generate the list of criteria for the criteria table header criteriaHeader = E.TR() for crit in AIdb.getCriteria(self.AISQL.getQueue(), strip=False): criteriaHeader.append(E.TH(crit)) # generate the manifest rows for the criteria table body names = AIdb.getManNames(self.AISQL.getQueue()) tableBody = E.TR() for manifest in names: # iterate through each manifest (and instance) for instance in range(0, AIdb.numInstances(manifest, self.AISQL.getQueue())): tableBody.append(E.TR()) # print the manifest name only once (key off instance 0) if instance == 0: tableBody.append( E.TD(E.A(manifest, href="/manifests/" + manifest, rowspan=str(AIdb.numInstances(manifest, self.AISQL.getQueue()))) ) ) else: tableBody.append(E.TD()) critPairs = AIdb.getManifestCriteria(manifest, instance, self.AISQL.getQueue(), onlyUsed=True, humanOutput=True) # critPairs is an SQLite3 row object which doesn't support # iteritems(), etc. for crit in critPairs.keys(): formatted_val = AIdb.formatValue(crit, critPairs[crit]) # if we do not get back a valid value ensure a hyphen is # printed (this prevents "" from printing) if formatted_val and critPairs[crit]: tableBody.append(E.TD(formatted_val, align="center")) else: tableBody.append(E.TD(lxml.etree.Entity("nbsp"), align="center")) # print the default manifest at the end of the table else: tableBody.append( E.TR( E.TD( E.A("Default", href="/manifests/default.xml")), E.TD(lxml.etree.Entity("nbsp"), colspan=str(max(len(list( AIdb.getCriteria(self.AISQL.getQueue(), strip=False))), 1)), align="center") ) ) web_page = \ E.HTML( E.HEAD( E.TITLE(_("%s A/I Webserver") % _DISTRIBUTION) ), E.BODY( E.H1(_("Welcome to the %s A/I " "webserver!") % _DISTRIBUTION), E.P(_("This server has the following " "manifests available, served to clients " "matching required criteria.")), E.TABLE( E.TR( E.TH(_("Manifest"), rowspan="2"), E.TH(_("Criteria List"), colspan=str(max(len(list( AIdb.getCriteria(self.AISQL.\ getQueue(), strip=False))), 1))) ), criteriaHeader, tableBody, border="1", align="center" ), ) ) return lxml.etree.tostring(web_page, pretty_print=True)
else: old_docs = old.docs[name] items = [] for key in old_docs.docs: old_id, old_title, old_xml = old_docs.docs[key] if key not in new_docs.docs: items.append(builder.I(builder.LI(old_title))) else: diffs = diff_xml(old_xml, new_docs.docs[key][2], verbose) if diffs is not None: title = builder.B(old_title) items.append(builder.LI(title, diffs)) if not items: body.append(builder.P(CHECK, OK)) else: body.append(builder.UL(*items)) parser = ArgumentParser() parser.add_argument("--old", required=True) parser.add_argument("--new", default=db.connect(user="******").cursor()) parser.add_argument("--verbose", action="store_true") opts = parser.parse_args() old = Data(opts.old) new = Data(opts.new, old) body = builder.BODY(builder.H1(TITLE)) compare_tables(body, old, new) compare_docs(body, old, new, opts.verbose) report = builder.HTML(HEAD, body) print(html.tostring(report, pretty_print=True).decode("ascii"))
def convert(title, text, rtl, server, articlepath, args): encoding = args.html_encoding if args else 'utf-8' text = NEWLINE_RE.sub('\n', text) doc = lxml.html.fromstring(text) CLEANER(doc) for convert_geo in (convert_geo_microformat, convert_geo_microformat2, convert_geo_microformat3): try: convert_geo(doc) except Exception: log.exception('Failed to convert geo') for selector in SELECTORS: if isinstance(selector, str): selector = CSSSelector(selector) for item in selector(doc): item.drop_tree() for item in SEL_A_IPA(doc): item.drop_tag() for item in SEL_A_NEW(doc): item.drop_tag() for sel_element_with_style in selector_list(args.remove_embedded_bg): for item in sel_element_with_style(doc): style = item.attrib['style'] try: ss = cssutils.parseStyle(style) except Exception: log.exception('Failed to parse style attr with value %r', style) else: ss.backgroundColor = None ss.background = None item.attrib['style'] = ss.cssText for item in SEL_HREF(doc): item.attrib['href'] = convert_url( item.attrib['href'], server=server, articlepath=articlepath, ensure_ext_image_urls=args.ensure_ext_image_urls) for item in SEL_SRC(doc): item.attrib['src'] = convert_url( item.attrib['src'], server=server, articlepath=articlepath, ensure_ext_image_urls=args.ensure_ext_image_urls) if 'srcset' in item.attrib: srcset = item.attrib['srcset'] if srcset: item.attrib['srcset'] = convert_srcset( srcset, server=server, articlepath=articlepath, ensure_ext_image_urls=args.ensure_ext_image_urls) has_math = len(SEL_MATH(doc)) > 0 if has_math: for item in SEL_IMG_TEX(doc): item.attrib.pop('srcset', None) item.attrib.pop('src', None) if server and articlepath: article_url = ''.join((server, articlepath, quote(title))) a = E.A(id="view-online-link", href=article_url) title_heading = doc.cssselect('h1') if len(title_heading) > 0: title_heading = title_heading[0] if title_heading.text: a.text = title_heading.text title_heading.text = '' title_heading.append(a) else: a.text = title title_heading = E.H1() title_heading.append(a) body = doc.find('body') if not body is None: body.insert(0, title_heading) else: doc.insert(0, title_heading) if has_math: math_jax = ('<script src="~/js/jquery-2.1.3.min.js"></script>' '<script src="~/MathJax/MathJax.js"></script>' '<script src="~/MathJax/MediaWiki.js"></script>') else: math_jax = '' result = ''.join(( '<script src="~/js/styleswitcher.js"></script>', '<link rel="stylesheet" href="~/css/shared.css" type="text/css">', '<link rel="stylesheet" href="~/css/mediawiki_shared.css" type="text/css">', '<link rel="stylesheet" href="~/css/mediawiki_monobook.css" type="text/css">', '<link rel="alternate stylesheet" href="~/css/night.css" type="text/css" title="Night">', math_jax, '<div dir="rtl" class="rtl">' if rtl else '', lxml.html.tostring(doc, encoding='unicode'), '</div>' if rtl else '', )).encode(encoding) return result
def full_html_from_doc(doc): article_element = fragment_fromstring(doc.summary().html) html_element = B.HTML( B.HEAD(B.TITLE(doc.title()), B.STYLE(DISPLAY_CSS, type='text/css')), B.BODY(B.H1(doc.title(), {'class': 'articleTitle'}), article_element)) return tostring(html_element)