def gen_posts(p_list): index = builder.HTML( builder.HEAD(*make_head()), builder.BODY(make_menu(), builder.DIV( builder.H1("Posts", builder.CLASS("section-title")), *make_short_posts(p_list), builder.CLASS("section")), style="background-color:#f7f7f7")) print(html.etree.tostring(index, pretty_print=True, method='html').decode("utf-8"), file=open(os.path.join(base_path, "posts.html"), "w")) for post in p_list: html_content = builder.DIV( builder.H1(post.title, builder.CLASS("full-post-title")), builder.DIV(post.date.strftime("%d %B %Y, %H:%M"), builder.CLASS("full-post-date")), builder.DIV(html.fromstring(post.content), builder.CLASS("full-post-content")), builder.CLASS("full-post-container")) page = builder.HTML( builder.HEAD( *make_head(), builder.SCRIPT("", src=base_url + "js/table.js"), ), builder.BODY(make_menu(), html_content)) print(html.etree.tostring(page, pretty_print=True, method='html').decode("utf-8"), file=open( os.path.join(base_path, "posts", post.id, "index.html"), "w"))
def run_local(self, url): log.last_url = None log.last_url_fetched = None log.ThugLogging.set_url(url) log.ThugOpts.local = True log.HTTPSession = HTTPSession() content = open(url, 'r').read() extension = os.path.splitext(url) encoding = cchardet.detect(content) if len(extension) > 1 and extension[1].lower() in ( '.js', '.jse', ): if not content.lstrip().startswith('<script'): html = tostring( E.HTML( E.HEAD(), E.BODY(E.SCRIPT(content.decode( encoding['encoding']))))) else: soup = BeautifulSoup(content, "html.parser") try: soup.html.unwrap() except AttributeError: pass try: soup.head.unwrap() except AttributeError: pass try: soup.body.unwrap() except AttributeError: pass html = tostring( E.HTML(E.HEAD(), E.BODY(E.SCRIPT(soup.script.get_text())))) else: html = content if log.ThugOpts.features_logging: log.ThugLogging.Features.add_characters_count(len(html)) log.ThugLogging.Features.add_whitespaces_count( len([a for a in html if a.isspace()])) doc = w3c.parseString(html) window = Window('about:blank', doc, personality=log.ThugOpts.useragent) window.open() self.__run(window)
def run_local(self, url): log.last_url = None log.last_url_fetched = None log.ThugLogging.set_url(url) log.ThugOpts.local = True log.HTTPSession = HTTPSession() content = open(url, 'r', encoding="utf-8").read() extension = os.path.splitext(url) if len(extension) > 1 and extension[1].lower() in ( '.js', '.jse', ): if not content.lstrip().startswith('<script'): html = tostring(E.HTML(E.HEAD(), E.BODY(E.SCRIPT(content)))) else: soup = bs4.BeautifulSoup(content, "html.parser") try: soup.html.unwrap() except AttributeError: pass try: soup.head.unwrap() except AttributeError: pass try: soup.body.unwrap() except AttributeError: pass code = soup.script.get_text(types=(NavigableString, CData, Script)) html = tostring(E.HTML(E.HEAD(), E.BODY(E.SCRIPT(code)))) else: html = content if log.ThugOpts.features_logging: log.ThugLogging.Features.add_characters_count(len(html)) whitespaces_count = len([ a for a in html if isinstance(a, six.string_types) and a.isspace() ]) log.ThugLogging.Features.add_whitespaces_count(whitespaces_count) doc = w3c.parseString(html) window = Window('about:blank', doc, personality=log.ThugOpts.useragent) window.open() self.__run(window)
def list_bans_menu(ban_list, purpose): """need to put bans and boards table creating to a joint function in future""" tablerows = [E.TR(E.TD(str(b.id)), E.TD(b.ip), E.TD(b.initiator), E.TD(time.strftime('%d/%m/%Y %H:%M', time.localtime(b.date))), E.TD(str(b.level)), E.TD(E.BUTTON('Снять', type = 'button', onclick = 'remove_ban(this);')) )for b in ban_list] #purpose will be applyed later html = E.HTML( E.HEAD( E.LINK(rel="stylesheet", href="/css/deeplight.css", type="text/css"), E.TITLE("Creating board"), E.SCRIPT(type = 'text/javascript', src = '/adminscript.js') #js ), E.BODY( E.DIV(E.CLASS('adminupdiv'), E.DIV(E.CLASS('logout'), E.A('Logout', href='/admin/logout')), E.H2(E.CLASS("heading"), "Listing bans"), ), E.TABLE( E.CLASS("boardstable"), E.TR(E.TH('ID'), E.TH('IP'), E.TH('Забанивший'), E.TH('Дата'), E.TH('Уровень'), E.TH('') ), *tablerows ) ) ) return lxml.html.tostring(html)
def body(self): """The BODY of the html document""" reports = E.OL(id='reports') code = self.code() for i, (state_html, state_problem) in enumerate(self.states(), 1): reports.append( E.LI( E.ATTR(id="state{0}".format(i)), E.E.header( E.DIV( E.CLASS('error'), state_problem, ), E.DIV( E.CLASS('report-count'), E.H3('Report'), str(i), ), ), E.DIV( E.CLASS('body'), E.DIV( E.CLASS('source'), deepcopy(code), ), state_html, ), ), ) return E.BODY( self.header(), reports, self.footer(), )
def login_page_gen(): html = E.HTML( E.HEAD( E.LINK(rel="stylesheet", href="/css/deeplight.css", type="text/css"), E.TITLE("Administration and moderation") ), E.BODY( E.H1(E.CLASS("heading"), "Farlight Engine Imageboard"), E.P(E.CLASS("loginmessage"), "You need to login"), E.FORM(E.CLASS("loginform"), E.TABLE( E.TR(E.TD('LOGIN'), E.TD(E.INPUT(type = 'text', name = 'login', value = '')) ), E.TR(E.TD('PASSWORD'), E.TD(E.INPUT(type = 'text', name = 'password', value = '')) ), ), E.INPUT(type = 'submit', value = 'LOGIN'), method = 'POST', action = '/admin/login' ) ) ) return lxml.html.tostring(html)
def password_change_menu(): html = E.HTML( E.HEAD( E.LINK(rel="stylesheet", href="/css/deeplight.css", type="text/css"), E.TITLE("Administration and moderation") ), E.BODY( E.H1(E.CLASS("heading"), "Farlight Engine Imageboard"), E.P(E.CLASS("loginmessage"), "Change your password"), E.FORM(E.CLASS("loginform"), E.INPUT(type = 'hidden', name = 'action', value = 'change'), E.INPUT(type = 'hidden', name = 'instance', value = 'password'), E.TABLE( E.TR(E.TD('OLD PASSWORD'), E.TD(E.INPUT(type = 'password', name = 'old_passwd', value = '')) ), E.TR(E.TD('NEW PASSWORD'), E.TD(E.INPUT(type = 'password', name = 'new_passwd', value = '')) ), E.TR(E.TD('NEW PASSWORD AGAIN'), E.TD(E.INPUT(type = 'password', name = 'new_passwd_again', value = '')) ), ), E.INPUT(type = 'submit', value = 'LOGIN'), method = 'POST', action = '/admin' ) ) ) return lxml.html.tostring(html)
def mailgen(): d = DataCmp().cmp_result() news_topic = {'hot_news': '热点新闻', 'hot_inform': '最新公告', 'hot_msg': '最新简讯'} def test(): for key, value in d.items(): yield E.H2(news_topic[key]) if any(value): for i in value: yield lxml.html.fromstring( '<a href="http://news.gdut.edu.cn/viewarticle.aspx?articleid={id}">{title}[{loc}]</a><br></br>' .format_map(i)) else: del news_topic[key] yield E.P('无') yield E.P('Sent at {}'.format( time.strftime("%y-%m-%d %H:%M", time.localtime()))) sub = [i for i in test()] if not any(news_topic): sub = [E.H2('今日无事可做')] html = E.HTML( E.HEAD( E.META(charset='UTF-8'), E.META(name='viewport', content='width=device-width, initial-scale=1.0'), E.TITLE("GDUTNews Stream")), E.BODY(*sub)) return lxml.html.tostring(html)
def package_as_standalone(self): """ Render the story outline as a html document with only the reveal.js presentation as conent Returns ---------- A `str` containing a complete html document with the presentation """ pres = self.render() body = E.BODY() body.append(pres) body = self.strip_unauthorized_scripts(body) for scripts in self.plugin_mgr.get_scripts(): lxml_scripts = lxml.html.fragments_fromstring(scripts) for item in lxml_scripts: body.append(item) head = E.HEAD() for meta in self.get_meta_tags(body): lxml_meta = lxml.html.fragment_fromstring(meta) head.append(lxml_meta) for link in self.plugin_mgr.get_styles(): lxml_links = lxml.html.fragments_fromstring(link) for item in lxml_links: head.append(item) head = self.strip_unauthorized_scripts(head) html = E.HTML(head, body) return lxml.html.tostring(html, pretty_print=True)
def render_html(results: Dict[str, Any]) -> str: heading = E.H2( E.A("Locust", href="https://github.com/simiotics/locust"), " summary") body_elements = [heading] refs = results.get("refs") if refs is not None: body_elements.extend([E.H3("Git references")]) body_elements.extend( [E.B("Initial: "), E.SPAN(refs["initial"]), E.BR()]) if refs["terminal"] is not None: body_elements.extend( [E.B("Terminal: "), E.SPAN(refs["terminal"]), E.BR()]) body_elements.append(E.HR()) changes_by_file = results["locust"] for item in changes_by_file: item_element = file_section_handler(item) body_elements.append(item_element) html = E.HTML(E.BODY(*body_elements)) results_string = lxml.html.tostring(html).decode() return results_string
def template(name, contents): cpu_class = 'active' if name == 'cpu' else '' wc_class = 'active' if name == 'wc' else '' help_class = 'active' if name == 'help' else '' return E.HTML( E.HEAD( E.LINK(rel='stylesheet', type='text/css', href='bootstrap/css/bootstrap.css'), E.LINK(rel='stylesheet', type='text/css', href='profile.css'), E.SCRIPT(src='bootstrap/js/bootstrap.min.js'), E.TITLE('RUM Job Profile')), E.BODY( E.DIV(E.DIV(E.DIV(E.A(E.SPAN(CLASS='icon-bar'), E.SPAN(CLASS='icon-bar'), E.SPAN(CLASS='icon-bar'), CLASS='btn btn-navbar'), E.A('RUM Profile', CLASS='brand', href='#'), E.DIV(E.UL(E.LI(E.A('CPU time', href='cpu.html'), CLASS=cpu_class), E.LI(E.A('Wallclock time', href='wc.html'), CLASS=wc_class), E.LI(E.A('Help', href='help.html'), CLASS=help_class), CLASS='nav'), CLASS='nav-collapse collapse'), CLASS='container'), CLASS='navbar-inner'), CLASS='navbar navbar-inverse navbar-fixed-top'), E.BR(), E.BR(), E.BR(), E.DIV(contents, CLASS='container')))
def process_file(input_file, output_file=None, encoding='utf-8'): if output_file is None: output_file = input_file + '.html' root = etree.parse(input_file).getroot() item = root.xpath("/items/item")[0] (method,) = item.xpath("method/text()") if method.lower() != "post": raise ValueError("Only POST requests are supported") # TODO (url,) = item.xpath("url/text()") (request,) = item.xpath("request") contents = request.text if request.get("base64"): contents = b64decode(contents) _, body = contents.split("\r\n\r\n", 1) output = E.HTML( E.HEAD(E.META(**{'http-equiv': 'Content-type', 'content': 'text/html; charset=' + encoding})), E.BODY( E.FORM( E.INPUT(type="submit"), *(E.INPUT(type="hidden", name=name, value=value) for name, value in decode_form_urlencoded_values(body, encoding)), action=url, method=method ) ) ) with codecs.open(output_file, 'wb', encoding) as html_output: html_output.write(html.tostring(output, encoding=unicode)) return output_file
def _create_html_root(self, hhcpath, log, encoding): hhcdata = self._read_file(hhcpath) hhcdata = hhcdata.decode(encoding) hhcdata = xml_to_unicode(hhcdata, verbose=True, strip_encoding_pats=True, resolve_entities=True)[0] hhcroot = html.fromstring(hhcdata) toc = self._process_nodes(hhcroot) log.debug('Found %d section nodes' % toc.count()) htmlpath = os.path.splitext(hhcpath)[0] + ".html" base = os.path.dirname(os.path.abspath(htmlpath)) def unquote(x): if isinstance(x, str): x = x.encode('utf-8') return _unquote(x).decode('utf-8') def unquote_path(x): y = unquote(x) if (not os.path.exists(os.path.join(base, x)) and os.path.exists(os.path.join(base, y))): x = y return x def donode(item, parent, base, subpath): for child in item: title = child.title if not title: continue raw = unquote_path(child.href or '') rsrcname = os.path.basename(raw) rsrcpath = os.path.join(subpath, rsrcname) if (not os.path.exists(os.path.join(base, rsrcpath)) and os.path.exists(os.path.join(base, raw))): rsrcpath = raw if '%' not in rsrcpath: rsrcpath = urlquote(rsrcpath) if not raw: rsrcpath = '' c = builder.DIV(builder.A(title, href=rsrcpath)) donode(child, c, base, subpath) parent.append(c) with open(htmlpath, 'wb') as f: if toc.count() > 1: path0 = toc[0].href path0 = unquote_path(path0) subpath = os.path.dirname(path0) base = os.path.dirname(f.name) root = builder.DIV() donode(toc, root, base, subpath) raw = html.tostring(builder.HTML(builder.BODY(root)), encoding='utf-8', pretty_print=True) f.write(raw) else: f.write(as_bytes(hhcdata)) return htmlpath, toc
def parse_wiki(tree, title): root = tree.getroot() parser_div = root.xpath("//div[@class='mw-parser-output']")[0] headers = ["h1","h2","h3","h4","h5","h6"] children = parser_div.getchildren() text = "" header = "" html = "" for child in children: if child.tag == "p": text += child.text_content().lstrip().rstrip() elif child.tag in headers: if len(text) > 0: summary = summarize(text, limit=2) html += "<h2>"+header+"</h2><p>"+summary+"</p>" text = "" header = child.text_content().split("[")[0] print(header) # TODO - add style sheet # TODO - format text html_out = E.HTML( E.HEAD( E.TITLE(title) ), E.BODY( E.H1(E.CLASS("heading"), title), lxml.html.fromstring(html) ) ) html_out.getroottree().write(file="summarized-roanoke.html", method="html")
def _createHTMLDocument(self, title = None): body = E.BODY() title = E.TITLE(title) if title else "" head = E.HEAD(title) html = E.HTML(head, body) soup = bs4.BeautifulSoup(tostring(html, doctype = '<!doctype html>'), "lxml") return DOMImplementation(soup)
def __init__(self, text, plaintext=False): self.text = text if plaintext: self.html = builder.HTML(builder.BODY(builder.CODE(self.text))) else: self.html = etree.HTML(markdown.markdown(self.text)) self.ns = 'xhtml' self.nsmap = {'xhtml': 'http://www.w3.org/1999/xhtml'} self.html = self.set_default_ns(self.html, default_ns=self.ns)
def __init__(self, datas, lang, freeze, title, css_path): super().__init__(datas, lang, freeze) self._html = builder.HTML( builder.HEAD( builder.META(charset="utf-8"), builder.TITLE(title), builder.LINK(rel="stylesheet", href=css_path), ), builder.BODY(builder.DIV(id="text_area")), )
def make_summary_doc(tests_w_results): tbody = B.TBODY( B.TR(B.TH('Test Name'), B.TH('Inserted (in # of blocks)'), B.TH('Deleted (in # of blocks)'), B.TH('Links'), B.TH('Notes'))) for (test, result) in tests_w_results: row = make_summary_row(test, result) tbody.append(row) return B.HTML( B.HEAD(B.TITLE('Readability Test Summary'), B.STYLE(SUMMARY_CSS, type='text/css')), B.BODY(B.TABLE(tbody)))
def gen_papers(p_list): index = builder.HTML( builder.HEAD(*make_head()), builder.BODY( make_menu(), builder.DIV(builder.H1("Papers", builder.CLASS("section-title")), *make_short_papers(p_list), builder.CLASS("section")))) print(html.etree.tostring(index, pretty_print=True, method='html').decode("utf-8"), file=open(os.path.join(base_path, "papers.html"), "w"))
def visit_Component(self, node): inst = getattr(self, 'inst', None) if inst: title = 'Instance {} of {} Register Map'.format(inst, node.name) else: title = 'Base {} Register Map'.format(node.name) self.title = title # Create the main content by sweeping the tree. bc = E.DIV(id='breadcrumbs') try: if self.breadcrumbs is not None: bc.append(self.breadcrumbs) except AttributeError: pass ww = node.width // 8 an = ((node.size - 1).bit_length() + 3) // 4 with self.tempvars(wordwidth=ww, address_nibbles=an, hlev=2): nodes = ([E.H1(title, id='title'), bc] + [E.P(d) for d in node.description] + [c for c in self.visitchildren(node)] + [self.footer(node)]) contentnode = E.DIV(*nodes, id='content') # Add a table of contents sidebar. We'll assume that everything that # wants to be in the TOC is already a heading and just work from there. h2list = E.UL() for elem in contentnode.iter('h2', 'h3'): id = escape(elem.text) elem.attrib['id'] = id if elem.tag == 'h2': h2node = E.LI(E.A(elem.text, href='#' + id)) h2list.append(h2node) h3list = None else: if h3list is None: h3list = E.UL() h2list.append(h3list) h3list.append(E.LI(E.A(elem.text, href='#' + id))) # Put it all together. return E.HTML( E.HEAD( E.TITLE(title), E.LINK(rel='stylesheet', type='text/css', href=htmlpathjoin(self.styledir, 'reg.css'))), E.BODY( E.DIV(E.DIV(E.P(E.A(title, href='#title')), h2list, id='sidebar'), contentnode, id='wrapper')), )
def run_local(self, url): log.ThugLogging.set_url(url) log.ThugOpts.local = True log.HTTPSession = HTTPSession() content = open(url, 'r').read() extension = os.path.splitext(url) if len(extension) > 1 and extension[1].lower() in ( '.js', '.jse', ): if not content.lstrip().startswith('<script'): html = tostring(E.HTML(E.HEAD(), E.BODY(E.SCRIPT(content)))) else: soup = BeautifulSoup(content, "html.parser") try: soup.html.unwrap() except AttributeError: pass try: soup.head.unwrap() except AttributeError: pass try: soup.body.unwrap() except AttributeError: pass html = tostring( E.HTML(E.HEAD(), E.BODY(E.SCRIPT(soup.script.get_text())))) else: html = content doc = w3c.parseString(html) window = Window('about:blank', doc, personality=log.ThugOpts.useragent) window.open() self.__run(window)
def gen_html(encoding=ENCODING): """Create the HTML structure :return: Return the body structure from lxml.html """ body = E.BODY() html = E.HTML( E.HEAD( E.LINK(rel="stylesheet", href=CSSFILE, type="text/css"), E.META(charset=encoding), ), body) return body
def generateHTML(city_info): html = E.HTML( E.HEAD(E.TITLE("Weather for:{}".format(city_info['city']))), E.BODY(E.H1("Weather for:{}".format(city_info['city'])), E.P("{}".format(city_info['city_temp'])), E.P("{}".format(city_info['city_forecast'])), E.P("{}".format(city_info['city_min'])), E.P("{}".format(city_info['city_max'])), E.P("{}".format(city_info['city_time'])))) byteString = lxml.html.tostring(html) string = byteString.decode('utf-8') return string
def _generate_project_report_in_html(self, project_name, project_bugs): report = E.BODY( E.H2(E.CLASS("heading"), "%s (%d)" % (project_name, len(project_bugs)))) for bug in project_bugs: bug_link = E.A(bug.title, href=bug.web_link, target='_blank') report.append( E.P("[%s:%s] " % (bug.importance, bug.status), bug_link)) if bug.assignee: report.append( E.P("Assigned to: %s" % (bug.assignee.display_name))) return report
def getCity(city): weatherStuff = weatherHandler(city) weatherStuff.get_weather() html = E.HTML( E.HEAD(E.TITLE("Weather for:{}".format(weatherStuff.city))), E.BODY(E.H1("Weather for:{}".format(weatherStuff.city)), E.P("{}".format(weatherStuff.formatted_temp)), E.P("{}".format(weatherStuff.formatted_weather)), E.P("{}".format(weatherStuff.formatted_minMax)), E.P("{}".format(weatherStuff.formatted_time)))) byteString = lxml.html.tostring(html) string = byteString.decode('utf-8') return string
def board_creation_menu(): #here is the html board creation menu html = E.HTML( E.HEAD( E.LINK(rel="stylesheet", href="/css/deeplight.css", type="text/css"), E.TITLE("Creating board") ), E.BODY( E.DIV(E.CLASS('adminupdiv'), E.DIV(E.CLASS('logout'), E.A('Logout', href='/admin/logout')), E.H2(E.CLASS("heading"), "Create new board"), ), E.DIV(E.CLASS("boardcreateform"), E.FORM( E.INPUT(type = 'hidden', name = 'action', value = 'create'), E.INPUT(type = 'hidden', name = 'instance', value = 'board'), E.TABLE( E.TR(E.TD('Address'), E.TD(E.INPUT(type = 'text', name = 'address', value = '')) ), E.TR(E.TD('Tablename'), E.TD(E.INPUT(type = 'text', name = 'tablename', value = '')) ), E.TR(E.TD('Name'), E.TD(E.INPUT(type = 'text', name = 'name', value = '')) ), E.TR(E.TD('Fullname'), E.TD(E.INPUT(type = 'text', name = 'fullname', value = '')) ), E.TR(E.TD('Description'), E.TD(E.INPUT(type = 'text', name = 'description', value = '')) ), E.TR(E.TD('Pics number'), E.TD(E.INPUT(type = 'number', name = 'picsnum', value = '', min = '0', max = '10')) ), E.TR(E.TD('Bumplimit'), E.TD(E.INPUT(type = 'number', name = 'bumplimit', value = '', min = '0')) ), E.TR(E.TD('Max threads'), E.TD(E.INPUT(type = 'number', name = 'maxthreads', value = '', min = '-1')) ), E.TR(E.TD(E.INPUT(type='checkbox', name='delposts', value='1', checked='checked'), 'Удаление постов', colspan='2', style='text-align:center;')), E.TR(E.TD(E.INPUT(type='checkbox', name='delopposts', value='1', checked='checked'), 'Удаление тредов', colspan='2', style='text-align:center;')), ), E.INPUT(type = 'submit', value = 'Create'), method='POST', action='/admin/' ) ) ) ) return lxml.html.tostring(html)
def html_page_return(board, thread, default_style): html = E.HTML( E.HEAD( E.META(**{'http-equiv':"Default-Style", 'content':default_style, 'id':'stylemetatag'}), E.TITLE("/"+board+"/ - №"+str(thread)), #title E.SCRIPT(type = 'text/javascript', src = '/mainscript.js'), #js *initiate.style_cache ), E.BODY( E.P(E.CLASS("board"), board, id = 'board'), E.P(E.CLASS("thread"), str(thread), id = 'thread'), E.TABLE( E.CLASS("maintable"), E.THEAD(E.TR(E.TD( E.TABLE(E.TR(E.TD(E.CLASS('left'), copy.copy(initiate.board_cache_navigation)), E.TD(E.CLASS('right'), utilfunctions.generate_right_up_corner_menu()), ), id='headblock'), E.HR(E.CLASS("delimeter")), )), id = 'header'), E.TBODY(E.TR(E.TD( E.H2(E.CLASS("boardname"), E.A('/' + board + '/ - '+ initiate.board_cache[board].name, href = '/' + board), ), E.HR(E.CLASS("delimeter")), initiate.board_cache[board].post_form, #need to make it depending on post_form_type E.SCRIPT('function open_form() {document.getElementById("postform").style.display = "block"; document.getElementById("closeform").style.display = "block"; document.getElementById("threadcreate").style.display = "none";}'), E.SCRIPT('function close_form() {document.getElementById("postform").style.display = "none"; document.getElementById("closeform").style.display = "none"; document.getElementById("threadcreate").style.display = "block";}'), E.H3(E.A('Ответить в тред', href = "javascript:open_form();"), id = 'threadcreate'), E.H4(E.A('Скрыть форму', href = "javascript:close_form();"), id = 'closeform'), E.HR(E.CLASS("delimeter")), EM('main', '', id = 'mainframe'), E.DIV('', id = 'optionsdiv'), )), id = 'mainpart'), E.TFOOT(E.TR(E.TD( E.DIV( E.HR(E.CLASS("delimeter"), id = 'end') ), initiate.board_cache_navigation, E.DIV('powered by ', E.A('Farlight Imageboard Engine', href='https://github.com/Alpherie/farlight_board_engine', target='_blank', ), id='credentials'), )), id = 'footer'),#we make it a footer ), onload = 'threadfunc()' ) ) return lxml.html.tostring(html)
def list_boards_menu(board_list, purpose): """need to put boards table creating to a separate function in future""" posts_num_cell = E.DIV(E.SPAN('????', style = 'display:inline-block; width:4em; text-align:center;'), E.INPUT(type='number', size='6', min='0', value='1', style = 'width: 6em;'), E.SELECT(E.OPTION('Секунды', value='1'), E.OPTION('Минуты', value='60'), E.OPTION('Часы', value='3600'), E.OPTION('Дни', value='86400', selected='') ), E.BUTTON('GET', onclick='get_posts_num_from_time(this)', type = 'button')) tablerows = [E.TR(E.TD(E.A(b.address, href = '/'+b.address)), E.TD(b.tablename), E.TD(str(b.name)), E.TD(str(b.fullname)), E.TD(str(b.description)), E.TD(str(b.category)), E.TD(str(b.pictures)), E.TD(str(b.bumplimit)), E.TD(str(b.maxthreads)), E.TD(copy.copy(posts_num_cell)) )for b in board_list] #purpose will be applyed later html = E.HTML( E.HEAD( E.LINK(rel="stylesheet", href="/css/deeplight.css", type="text/css"), E.TITLE("Creating board"), E.SCRIPT(type = 'text/javascript', src = '/adminscript.js') #js ), E.BODY( E.DIV(E.CLASS('adminupdiv'), E.DIV(E.CLASS('logout'), E.A('Logout', href='/admin/logout')), E.H2(E.CLASS("heading"), "Listing boards"), ), E.TABLE( E.CLASS("boardstable"), E.TR(E.TH('Адрес'), E.TH('Таблица'), E.TH('Название'), E.TH('Полное название'), E.TH('Описание'), E.TH('Категория'), E.TH('Максимум картинок'), E.TH('Бамплимит'), E.TH('Максимум тредов'), E.TH('Постов за последнее время') ), *tablerows ) ) ) return lxml.html.tostring(html)
def gen_index(p_list): about = html.fromstring(open("src/html/about.html").read()) contact = html.fromstring(open("src/html/contact.html").read()) index = builder.HTML( builder.HEAD( *make_head(), builder.LINK(rel="stylesheet", href=base_url + "css/about.css")), builder.BODY(make_menu(index=True), about, contact)) print(html.etree.tostring(index, pretty_print=True, method='html').decode("utf-8"), file=open(os.path.join(base_path, "index.html"), "w"))
def main(): parser = argparse.ArgumentParser(description='Get an HTML table from an OPML file with web resources ordered by Google PageRank.') parser.add_argument('--input', nargs=1, required=True, help='name of the OPML input file') parser.add_argument('--output', nargs=1, required=True, help='name of the HTML output file') args = parser.parse_args() input_file = args.input[0] output_file = args.output[0] tree = etree.parse(input_file) elems = tree.xpath("//opml/body/outline/outline") counter = 0 buggy = [] urls = [] for elem in elems: url = elem.attrib["htmlUrl"] title = elem.attrib["title"] pr = False retries = 3 while retries != 0: try: pr = int(pagerank.GetPageRank(url)) retries = 0 except Exception as e: retries -= 1 time.sleep(3) if pr != False: print url, title, pr urls.append({'title':title, "url":url, "pagerank":pr}) counter += 1 else: buggy.append({'url':url, 'title':title}) f = open(output_file, "w") headers = ["Title", "URL", "PageRank"] sorted_urls = sorted(urls, key=lambda k:k["pagerank"]) sorted_urls_as_a_list = [] sorted_urls.reverse() style = "border: 1px solid black" for elem in sorted_urls: print elem["title"], elem["url"], elem["pagerank"] sorted_urls_as_a_list.append(E.TR(E.TD(elem["title"], style=style), E.TD(E.A(elem["url"],href=elem["url"]), style=style), E.TD(str(elem["pagerank"]), style=style))) html = E.HTML(E.HEAD(), E.BODY(E.TABLE(*tuple(sorted_urls_as_a_list), style="border: 1px solid black; border-collapse: collapse"))) htmlcode = lxml.html.tostring(html) f.write(htmlcode) f.close()