def test_render_single_page(self): renderer = dominatepp.PagesRenderer([T.article('item1'), T.article('item2')]) renderer.render(posts_per_page=5) pages = renderer.pages self.assertEqual(len(pages), 2) # index + remaining self.assertDictContains(pages, 'index.html') self.assertDictContains(pages, '0.html') page0 = pages['0.html'].render() assert_contains_html(self, page0, "<article>item1</article>") assert_contains_html(self, page0, "<article>item2</article>")
def dump_html_standalone(snippets, fname, webpage_style, include_banner, include_vernums, html_assets, html_classes): from dominate import tags, document from dominate.util import raw from . import GENERATOR from .core import SerAPI from .html import gen_banner, wrap_classes, ASSETS from .pygments import HTML_FORMATTER doc = document(title=fname) doc.head.add(tags.meta(charset="utf-8")) doc.head.add(tags.meta(name="generator", content=GENERATOR)) doc.set_attribute("class", "alectryon-standalone") for css in ASSETS.ALECTRYON_CSS: doc.head.add(tags.link(rel="stylesheet", href=css)) for link in (ASSETS.IBM_PLEX_CDN, ASSETS.FIRA_CODE_CDN): doc.head.add(raw(link)) for js in ASSETS.ALECTRYON_JS: doc.head.add(tags.script(src=js)) html_assets.extend(ASSETS.ALECTRYON_CSS) html_assets.extend(ASSETS.ALECTRYON_JS) pygments_css = HTML_FORMATTER.get_style_defs('.highlight') doc.head.add(tags.style(pygments_css, type="text/css")) cls = wrap_classes(webpage_style, *html_classes) root = doc.body.add(tags.article(cls=cls)) if include_banner: root.add(raw(gen_banner(SerAPI.version_info(), include_vernums))) for snippet in snippets: root.add(snippet) return doc.render(pretty=False)
def test_save_multipage(self): renderer = dominatepp.PagesRenderer( [ T.article('item1'), T.article('item2'), T.article('item3'), T.article('item4'), ], page_css=b'h { color: red; }', ) renderer.render(posts_per_page=2) renderer.save_to(self.test_dir) self.assertPathExists(self.test_dir.joinpath('index.html')) self.assertPathExists(self.test_dir.joinpath('0.html')) self.assertPathExists(self.test_dir.joinpath('1.html')) self.assertPathExists(self.test_dir.joinpath('page.css'))
def test_render_multipage(self): renderer = dominatepp.PagesRenderer([ T.article('item1'), T.article('item2'), T.article('item3'), T.article('item4'), ]) renderer.render(posts_per_page=2) pages = renderer.pages self.assertEqual(len(pages), 3) # index + 2 pages self.assertDictContains(pages, 'index.html') self.assertDictContains(pages, '0.html') self.assertDictContains(pages, '1.html') page0 = pages['0.html'].render() assert_contains_html(self, page0, "<article>item1</article>") assert_contains_html(self, page0, "<article>item2</article>") page1 = pages['1.html'].render() assert_contains_html(self, page1, "<article>item3</article>") assert_contains_html(self, page1, "<article>item4</article>")
def report(job, report): """Generate a report.html with analysis and timelines from given jobs.""" jobs = job today = datetime.datetime.now().strftime("%Y-%m-%d") doc = dominate.document(title='Build Stats - {}'.format(today)) with doc.head: dt.link( rel='stylesheet', href= 'https://cdnjs.cloudflare.com/ajax/libs/github-markdown-css/3.0.1/github-markdown.min.css' ) dt.meta(name='viewport', content='width=device-width, initial-scale=1') # Create <article>...</article> according to sindresorhus/github-markdown-css article = dt.article(cls='markdown-body') doc += article with article: dt.h1('Marathon Loop Build Stats ({})'.format(today)) # Generate report for each job. for job in jobs: j = JenkinsJob.load(job) loop = asyncio.get_event_loop() tests = loop.run_until_complete(j.unique_fails()).to_html() errors = loop.run_until_complete(j.unique_errors()).to_html() f = io.BytesIO() fail_statuses = ['FAILED', 'REGRESSION'] loop = asyncio.get_event_loop() df = loop.run_until_complete(j.test_dataframe()) ts = df.groupby(level=0).agg({ 'status': lambda x: x.isin(fail_statuses).any(), 'timestamp': 'max' }) plt.figure(figsize=(20, 3)) c = ts.status.map({True: 'xkcd:light red', False: 'xkcd:light blue'}) plt.bar(x=ts.timestamp, height=1, width=0.01, color=c, align='edge') plt.xlim([ts.timestamp.min(), ts.timestamp.max()]) plt.savefig(f, format='svg') with article: dt.h2('Marathon {}'.format(job)) dt.div(dominate.util.raw(errors)) dt.div(dominate.util.raw(tests)) dt.div(dominate.util.raw(f.getvalue().decode('utf-8'))) with open(report, "w") as report_file: print(doc, file=report_file)
def generateHtml(): with open(path.join(current_dir, '../changelog/', 'storage.json'), 'r') as f: data = json.load(f)[::-1] doc = document(title='Changelog - lkellar.org') articles = [] with doc.head: tags.link(rel='stylesheet', href='style.css') tags.meta(charset="UTF-8") tags.meta(name="description", content="A log of all changes made on lkellar.org") tags.meta(name="viewport", content="width=device-width, initial-scale=1") tags.link(rel="alternate", title="Changelog Feed", type="application/json", href="https://lkellar.org/changelog/feed.json") with doc: with tags.nav().add(tags.ol()): with tags.li(): tags.a("Home", href="../") tags.li("Changelog") with tags.main(): tags.h1('Changelog') for entry in data: tags.hr() article_content = tags.article() with article_content: tags.h2( f'{entry["title"]} - {entry["date"].split("T")[0]}', id=f'{entry["title"]} - {entry["date"]}'.replace( ' ', ''.lower())) list_content = tags.ul() with list_content: for line in entry['items']: line = urls.sub(r'<a href="\2">\1</a>', line) tags.li(raw(line)) articles.append((f'{entry["title"]} - {entry["date"]}'.replace( ' ', ''.lower()), list_content.render(), entry["date"], entry['title'])) with open(path.join(current_dir, '../changelog/', 'index.html'), 'w') as f: f.write(doc.render()) generateFeed(articles)
def make_image_table(doc, img_root, img_folders, shuffle=False, max_imgs=20): # Get all images for each folder all_images = [] for img_folder in img_folders: img_names = [ os.path.join(img_folder, name) for name in sorted(os.listdir(os.path.join(img_root, img_folder))) ] if shuffle: random.shuffle(img_names) all_images.append(img_names[:max_imgs]) # Arrange as list [{0: img_1_folder_0, 1:img_1_folder_1, ..}, ] max_len = max([len(images) for images in all_images]) all_arranged_imgs = [] # Generate for each row dictionary of folder_idx: img_path for idx in range(max_len): idx_dic = {} for folder_idx, img_names in enumerate(all_images): if idx < len(img_names): idx_dic[folder_idx] = img_names[idx] all_arranged_imgs.append(idx_dic) num_folders = len(img_folders) with doc: with dtags.article(cls="markdown-body"): with dtags.table().add(dtags.tbody()): for arranged_imgs in all_arranged_imgs: with dtags.tr(): for folder_idx in range(num_folders): if folder_idx in arranged_imgs: img_path = arranged_imgs[folder_idx] dtags.td().add(dtags.img(src=img_path)) else: dtags.td()
) split_df = pd.read_csv( smthg_root / f"something-something-v2-{split_suffix}.csv", delimiter=";", names=["video_id", "action_label"], ) df = split_df[split_df.action_label.isin(action_labels)] print( f"Kept {df.shape[0]} out of {split_df.shape[0]} videos with labels {action_labels}" ) keep_rows = split_df.action_label.isin(action_labels) video_ids = df[keep_rows].video_id.values.tolist() # Arrange as list [{0: img_1_folder_0, 1:img_1_folder_1, ..}, ] grid = [[{ "label": video_id, "type": "video", "path": smthg_root / "videos" / f"{video_id}.webm", }] for video_id in video_ids] with doc: with dtags.article(cls="markdown-body"): htmlgrid.html_grid(grid) if args.destination is not None: with open(args.destination, "w") as f: f.write(doc.render()) print("Write html to {}".format(args.destination)) if args.print: print(doc)
def profile_to_html(profile_info): html_body = body() with html_body: with section(_class="leading animated fadeInDown"): with a(href='/'): p(profile_info['personal_info']['name'], _class="leading-bigtext") p(profile_info['personal_info']['summary'][:-4], _class="leading-text") for section_title in ['education', 'jobs', 'volunteering']: with section(_class=f"cards animated fadeInUp {section_title}"): div(section_title, _class='section-title') roles = profile_info['experiences'][section_title] for role in roles: if len(set(role.values())) > 1: with article(): with div(_class='cventry'): if section_title == "jobs": with div(_class='entry-header'): div(role['title'], _class='entry-title') div(role['date_range'], _class='entry-date') with div(_class="entry-subheader"): div(role['company'], _class='entry-organisation') div(role['location'], _class='entry-location') with div(_class='entry-body'): split = role['description'].split('- ') if len(split) > 1: with ul(): for para in split[1:]: li(para) else: p(role['description']) elif section_title == "education": with div(_class='entry-header'): div(role['name'], _class='entry-title') div(role['date_range'], _class='entry-date') with div(_class="entry-subheader"): div(role['field_of_study'], _class='entry-organisation') with div(_class='entry-location'): span(role['degree']) if role['grades'] != '' and role[ 'degree'] != '': span(', ') i(f"{role['grades']}", style="font-weight:300") with div(_class='entry-body'): split = role['description'].split('- ') if len(split) > 1: with ul(): for para in split[1:]: li(para) else: p(role['description']) elif section_title == "volunteering": with div(_class='entry-header'): div(role['title'], _class='entry-title') div(role['date_range'], _class='entry-date') with div(_class="entry-subheader"): div(role['company'], _class='entry-organisation') # div(role['cause'], _class='entry-location') with div(_class='entry-body'): split = role['description'].split('- ') if len(split) > 1: with ul(): for para in split[1:]: li(para) else: p(role['description']) with section(_class="cards animated fadeInDown skills"): div('skills', _class='section-title') with article(): for skill in profile_info['skills']: div(skill['name'], _class='skill') return str(html_body)