def test_raw(): from dominate.util import raw d = div() with d: raw('Hello World<br />') assert d.render() == '''<div>Hello World<br /></div>'''
def inline_script(base_path, script_path): comment = ( "\n// %s\n" % script_path if script_path else "" ) with open(os.path.join(base_path, script_path)) as ifs: with script() as result: raw(comment + ifs.read()) return result
def __init__(self, *fields, col_args=None, offset=None, **kwargs): self.kclass_dep = KClassDep('row') self.kstyle = KStyle('padding-bottom: 10px;', 'padding-top:20px;') super().__init__(**self.update_kwargs(kwargs)) def _col(n): return 'col-sm-{}'.format(n) if offset is not None: self.add(div(_class=_col(offset))) if col_args is None: col_args = () count = 0 while count < len(fields): # _kwargs get passed to the form group or div if it's a submit field _kwargs = {} if count < len(col_args): _kwargs['_class'] = _col(col_args[count]) _kwargs['style'] = 'float:left;' field = fields[count] if isinstance(field, SubmitField): _kwargs['style'] += 'padding-top:25px;' self.add(div(raw(field(class_='btn btn-primary')), **_kwargs)) else: self.add(FormGroup(field=field, **_kwargs)) count += 1
def __init__(self, *args, field=None, **kwargs): self.kclass_dep = KClassDep('form-group') if field is not None: if field.errors: self.kclass_dep.append('has-error') if field.flags.required: self.kclass_dep.append('required') super().__init__(**self.update_kwargs(kwargs)) if field is not None: self.add( raw(str(field.label)), br(), raw(field(class_='form-control')), ) if field.errors: for error in field.errors: self.add(p(error, _class='help-block'))
def visit_Field(self, node): # FIXME: add error class wrap = self._get_wrap(node) # add the label wrap.add(tags.label(node.label.text, _for=node.id)) wrap.add(raw(node())) if node.description: wrap.add(tags.p(node.description, _class='form-text text-muted')) return wrap
def __init__(self, form=None, pull_right=False, **kwargs): super().__init__(**kwargs) self.form = form self.pull_right = pull_right if self.form is not None: try: self.add(raw(form.hidden_tag())) except: pass for field in self.form: if field.widget.input_type is not 'hidden' and \ field.widget.input_type is not 'submit': self.add(FormGroup(field=field)) elif field.widget.input_type is 'submit': button_class = 'btn btn-primary' if self.pull_right is True: button_class += ' pull-right' self.add(raw(field(class_=button_class)))
def raw_script(s): raw(f'<script>{s}</script>')
def palp_geojson(r): mapdiv = div(id="minimap") with mapdiv: innerdiv = div(id="minimap-geojson", style="display:none") if bool(r.geojson): innerdiv += adjust_geojson(r.geojson) elif bool(json.loads(r.spatially_within)): innerdiv += adjust_geojson( json.loads(r.spatially_within)[0]['geojson']) else: innerdiv += '' pompeiidiv = div(id="pompeii-geojson", style="display:none") pompeiidiv += POMPEII.geojson withindiv = div(id="within-geojson", style="display:none") if bool(json.loads(r.spatially_within)): withindiv += adjust_geojson( json.loads(r.spatially_within)[0]['geojson']) div(id="minimapid", style="float:right; width: 40%; height: 400px;display:none") s = script(type='text/javascript') s += raw( """// check if the item-geojson div has content and make a map if it does. if ($('#minimap-geojson').html().trim()) { // fit to resource starts as true, will get set to false if ther eis within_geojson var fit_to_resource = true; $('#minimapid').show() var mymap = L.map('minimapid').setView([40.75, 14.485], 16); L.tileLayer('https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}', { maxZoom: 19, attribution: 'Tiles © Esri — Source: Esri, i-cubed, USDA, USGS, AEX, GeoEye, Getmapping, Aerogrid, IGN, IGP, UPR-EGP, and the GIS User Community', id: 'mapbox.streets' }).addTo(mymap); var pompeii_geojson = L.geoJSON(JSON.parse($('#pompeii-geojson').html())); pompeii_geojson.addTo(mymap); mymap.fitBounds(pompeii_geojson.getBounds()); if ($('#within-geojson').html().trim()) { var within_geojson = L.geoJSON(JSON.parse($('#within-geojson').html()), { style: {"color":"yellow", "opacity": 0, "fillOpacity": .4}, onEachFeature: function (feature, layer) { var id_no_urn = feature.id; id_no_urn = id_no_urn.replace("urn:p-lod:id:",""); layer.bindPopup('<a href="/browse/'+id_no_urn+'">'+id_no_urn+'</a>'); //layer.on('click', function (e) { //console.log('/browse/'+id_no_urn); //window.open('/browse/'+id_no_urn,"_self"); //}); layer.bindTooltip(id_no_urn); } }); within_geojson.addTo(mymap); mymap.fitBounds(within_geojson.getBounds()); fit_to_resource = false; } features = L.geoJSON(JSON.parse($('#minimap-geojson').html()), { style: {"color":"red", "weight": 1, "fillOpacity":.5}, onEachFeature: function (feature, layer) { var id_no_urn = feature.properties.title; id_no_urn = id_no_urn.replace("urn:p-lod:id:",""); layer.bindPopup('<a href="/browse/'+id_no_urn+'">'+id_no_urn+'</a>'); //layer.on('click', function (e) { //console.log('/browse/'+id_no_urn); //window.open('/browse/'+id_no_urn,"_self"); //}); layer.bindTooltip(id_no_urn); } }) features.addTo(mymap); if (fit_to_resource) { mymap.fitBounds(features.getBounds()); } }""") return mapdiv
def list2html(input_list, image_name, image_dir, output_dir, unicode_df=None, tesseract_hocr=True, tesseract_text=True, include_image=True, feather_x=2, feather_y=2): """ Given an input list, write a corresponding html file. All extractions and postprocessing occur in this function. :param input_list: List output from xml2list :param image_name: Name of image corresponding to page in pdf :param image_dir: image directory :param output_dir: output directory to write html :param unicode_df: Optional dataframe containing unicode information :param tesseract_hocr: flag to include tesseract hocr :param tesseract_text: flag to include tesseract text :param include_image: flag to include cropped images for each hocr :param feather_x: x feathering parameter to increase accuracy of ocr :param feather_y: x feathering parameter to increase accuracy of ocr """ doc = dominate.document(title=image_name[:-4]) inter_path = os.path.join(output_dir, 'img', image_name[:-4]) im2latex_model = get_im2latex_model(IM2LATEX_WEIGHT) with doc: img = Image.open(os.path.join(image_dir, image_name)) for ind, inp in enumerate(input_list): t, coords, score = inp width, height = img.size # Feather the coords here a bit so we can get better OCR ccoords = [ max(coords[0] - feather_x, 0), max(coords[1] - feather_y, 0), min(coords[2] + feather_x, width), min(coords[3] + feather_y, height) ] cropped = img.crop(ccoords) input_id = str(t) + str(ind) hocr = pytesseract.image_to_pdf_or_hocr( cropped, extension='hocr').decode('utf-8') # Going to run a quick regex to find the body tag body = re.search(r'.*<body>(.*)</body>.*', hocr, re.DOTALL) b_text = body.group(1) d = div(id=input_id, cls=str(t)) with d: if include_image: if not os.path.exists(inter_path): os.makedirs(inter_path) output_img_path = os.path.join(output_dir, 'img') if not os.path.exists(output_img_path): os.makedirs(output_img_path) crop_path = os.path.join(output_img_path, image_name[:-4], f'{input_id}.png') cropped.save(crop_path) crop_img_path = os.path.join('img', image_name[:-4], f'{input_id}.png') dominate.tags.img(src=crop_img_path) if b_text and tesseract_hocr: # We do a quick loading and deloading to properly convert encodings div(raw(b_text), cls='hocr', data_coordinates= f'{coords[0]} {coords[1]} {coords[2]} {coords[3]}', data_score=f'{score}') loaded = html.fromstring(b_text) # Running variable ocr is too expensive right now. We need a better solution for this # If we were to run variable ocr over every token, it would go here. tree = etree.fromstring(etree.tostring(loaded)) if unicode_df is not None: match = FILE_NAME_PATTERN.search(image_name) pdf_name = '/input/' + match.group(1) page_num = int(match.group(2)) unicode_tree, text, first_id = unicode_representation( unicode_df, page_num, tree, coords, t) #occasionally the class here would be replaced by 'Page Header', cannot figure our why div(raw(etree.tostring(unicode_tree).decode("utf-8")), cls='text_unicode', data_coordinates= f'{coords[0]} {coords[1]} {coords[2]} {coords[3]}', id=str(first_id)) div(text, cls='equation_unicode') with open(os.path.join(output_dir, f'{image_name[:-4]}.html'), 'w', encoding='utf-8') as wf: wf.write(doc.render())
import dominate import pandas as pd from dominate.tags import * from dominate.util import raw # File to write outputHTML = open("public/projects.html", "w") # Create sidebar sidebar = raw(""" <div class="sidebar"> <button class="barlink-item bar-active" onclick="openLink(event, 'projects-programming')"><i class="fas fa-code"></i> Programming</button> <button class="barlink-item" onclick="openLink(event, 'projects-papers')"><i class="fas fa-scroll"></i> Papers</button> </div> """) # Create outer div outHTML = div(cls="main-content") main = div(cls="main-content-previews") # Create Programming div file_prog = pd.read_csv("posts/programming.csv") div_prog = div(id="projects-programming", cls="category fade-anim") div_prog += h1("Programming") for index, row in file_prog.iterrows(): # Parse tags tags = str.split(row['Tags']) tags_class = "" # Initialize string of classes for future filtering div_tags = ul(cls="tag-list") # Initialize list of tags for tag in tags: div_tags += li(tag, cls="tag")
def visit_HiddenField(self, node): return raw(node())
import dominate import pandas as pd from dominate.tags import * from dominate.util import raw import datetime # File to write outputHTML = open("../public/blog.html", "w") # Create sidebar sidebar = raw(""" <div class="sidebar" id="sidebar-blog"> <button class="barlink-item bar-active" onclick="openLink(event, 'blog-posts')"><i class="fas fa-pencil-alt"></i> Blog Posts</button> <button class="barlink-item" onclick="openLink(event, 'blog-books')"><i class="fas fa-book"></i> Books</button> </div> """) # Create outer div outHTML = div(cls="main-content") main = div(cls="main-content-previews") # Create blog post div, post by post file_blog = pd.read_csv("../posts/blog_posts.csv") file_blog['Date'] = pd.to_datetime(file_blog['Date']) file_blog['Date'] = file_blog['Date'].dt.strftime('%B %d, %Y') div_blog = div(id="blog-posts", cls="category fade-anim") div_blog += h1("Blog Posts") for index, row in file_blog.iterrows(): # Parse tags tags = str.split(row['Tags']) tags_class = "" # Initialize string of classes for future filtering
text = message.split("\t ")[1].split(" : ")[1] # creating a dictionary to then add to the chat_dict and print it dict = {"time": date, "from": sender, "message": text} chat_list.append(dict) #print(dict) doc = dominate.document(title='Class chat of ' + today.strftime("%d/%m/%Y")) with doc.head: link(rel='stylesheet', href='style.css') script(type='text/javascript', src='script.js') with doc: with div(id='content'): for msg in chat_list: s with div(): attr(cls="chat_msg") h4(msg["from"]) raw('<blockquote>' + str(msg["message"]) + '</blockquote>') h6(msg["time"]) #print(doc) # Saves the document in an HTML file html_file = open("chat-" + str(today) + ".html", "w") html_file.write(str(doc)) html_file.close()
def visit_RawTag(self, node): return raw(node.content)
_style = f.read() header = "Cluster ID", "Number of genes", "Trend" _title = "Gene expression trends" doc = dominate.document(title=_title) with doc.head: style(_style) with doc: p(strong(_title)) with table(id="myTable") as _table: _tr = tr() _tr.add([th(x) for x in header]) for cl in sorted(data_dict.keys()): count = data_dict[cl] with tr(): td( raw('<a href=%s target="_blank">%s</a>' % (os.path.join( args.html_dir, "cluster_%d.html" % cl), cl))) td(count) td( img(src=os.path.join(args.plot_dir, 'trend_c%d.png' % cl), alt="trend", style="width:40%")) print(doc.render()) #with open(path + ".tsv" , 'w') as f: #f.write("%s\n" % "\t".join(header[1:])); #for d in data: #newd, gene = annotate(d, gene2annotation) #f.write("%s\n" % "\t".join(newd));
def make_email(news, watchlist, name, comments, pictures, pic_comments): """ Args: news: dataframe with relevant broker news watchlist: series of watchlist name: the name of the person the email is sent to comments: str with general pictures: the path of pictures to be included pic_comments: the path of the picture comments to be included Returns: A string with the html to embed in the email """ assert (len(pictures) == len(pic_comments)) # make the document doc = dominate.document(title='Email') # *{ # font - family: Arial, Helvetica, sans - serif !important; # color: black; # } with doc.head: raw(""" <style> * { font-family: Arial, Helvetica, sans-serif !important; color: black; } table, th, td { padding: 2px; border: none; /* border: 1px solid black; border-collapse: collapse; */ } </style> """) with doc: # make the title area with div(): h1(name) p(comments) # make the market news with div(): h4(datetime.now().strftime("%m/%d/%Y")) h4("Interest List Broker Notes. Let us know if you need us to track down a note(s) for you" ) with table() as t: my_tr = tr() # make the header row for hl in [ 'Primary Tickers', 'Headline', 'Broker', 'Bloomberg Link' ]: my_tr.add(th(hl)) t.add(my_tr) # insert the news data for _, row in news.iterrows(): my_tr = tr() my_tr.add(td(row[0])) my_tr.add(td(a(row[1], href=row[4]))) my_tr.add(td(row[2])) my_tr.add(td(row[3])) t.add(my_tr) # put some cool graphs or something here if len(pictures) > 0: with div(): h4("Some other title") with table() as t: for j in range(len(pictures)): my_tr = tr() my_tr.add(td(pictures[j])) my_tr.add(td(pic_comments[j])) t.add(my_tr) # make the interest list with div(): h4("Your Interest List") with table(): for i in watchlist: tr().add(td(i)) return str(doc)[15:]
with doc.head: style(_style) with doc: p(strong("AT rich areas")) input(type="text", id="myInput", onkeyup="my_search(4)", placeholder="Filter relation to CgpS..") input(type="number", id="myInputGreater", onkeyup="my_filter_greater(6)", placeholder="Filter AT content greater than..") with table(id = "myTable") as _table: _tr = tr() _tr.add([ th(x[1][0], onclick='sortTable(%d, %d)' % (x[0], x[1][1])) for x in enumerate(zip(headers, dtypes)) ]) for interval in peaks: with tr(): td(raw('<a href=%s target="_blank">ucsc_link</a>' % add_ucsc(interval, args.ucsc, flank=25)) ) td(interval.chrom) td(interval.start-25) td(interval.stop+25) td(cgps_dict[interval.attrs['type']]) td(interval.attrs['upstream']) td("%1.1f" % (float(interval.score)*100)) td("%1.1f" % (float(interval.attrs['at_flank'])*100)) _script = script(type='text/javascript') _script.add_raw_string(plain_script) print(doc.render());
with doc.head: style(_style) with open(args.js) as f: plain_script = f.read() with doc: p(strong("Mapping Results")) with table(id="myTable") as _table: _tr = tr() _tr.add([ th(x[1][0], onclick='sortTable(%d, %d)' % (x[0], x[1][1])) for x in enumerate(zip(labels, dtypes)) ]) for el in my_table: with tr(): td( raw('<a href=%s target="_blank">%s</a>' % (os.path.join(el[0], "report.html"), el[0]))) for my_cell in el[1:]: td(my_cell) _script = script(raw(plain_script), type='text/javascript') with open(os.path.join(args.outdir, 'report.html'), 'w') as f: f.write(doc.render()) with open(os.path.join(args.outdir, 'report.tsv'), 'w') as f: f.write("\t".join(labels) + "\n") for el in my_table: f.write("\t".join(el) + "\n")
def write_swit(self, args): """ write the report for the switches test :param args: tpl with input for the report """ stitle, switches, endo_its, dlink = args swit_type = ['full', 'sum'] sw_settings = switches.transpose().to_html(classes='results', bold_rows=False) with self.report: div(h2(a(stitle, id=stitle.replace(' ', '')))) div(p('This summarizes the switch settings')) if switches.empty: div(p('There are no switches in the model')) else: raw(sw_settings) div(a('Link to the switch settings file', href=r'file:./%s\switch_settings.csv' % dlink)) div(p('The full switch settings report every possible switch combination. The summarized switch settings ' 'report the runs where each switch is activated as the only switch.')) if endo_its == 0: with table(cls='results'): th('Model Type') th('Model') for stype in swit_type: if stype == 'full': name = 'full switch settings' else: name = 'summarized switch settings' with tr(): td(name) td(a(img(src=r'file:./%s\%s.gv.svg' % (dlink, stype), height=self.model_height, width=self.model_width) , href=r'file:./%s\%s.gv.svg' % (dlink, stype))) else: with table(cls='results'): th('Model Type') for it in range(endo_its): th('Model, group %s' % it) for stype in swit_type: if stype == 'full': name = 'full switch settings' else: name = 'summarized switch settings' with tr(): td(name) for it in range(endo_its): if type == 'sum': if it == 0: td(a(img(src=r'file:./%s\%s.gv.svg' % (dlink, stype), height=self.model_height, width=self.model_width) , href=r'file:./%s\%s.gv.svg' % (dlink, stype))) else: pass else: td(a(img(src=r'file:./%s\%s_%s.gv.svg' % (dlink, stype, it), height=self.model_height, width=self.model_width) , href=r'file:./%s\%s_%s.gv.svg' % (dlink, stype, it))) div(a('Link to the switches folder', href=r'file:./%s' % dlink))
def write_ext(self, args): """ write the report for the extreme condition test :param args: tpl with input for the report """ stitle, max_mult, endo_its, tbl_lst, tbl_errors, flagged, values, tbl_flagged, dlink = args flag_tbl = flagged.to_html(classes='results', bold_rows=False, justify='center') tbl_flag_tbl = tbl_flagged.to_html(classes='results', bold_rows=False, justify='center') values_tbl = values.to_html(classes='results', bold_rows=False, justify='center') ext_lst = [0, max_mult] with self.report: div(h2(a(stitle, id=stitle.replace(' ', '')))) div(p('The following values were used for the parameters in the extreme condition test:')) raw(values_tbl) div(p('These are the resulting models from the extreme condition test with parameters multiplied')) if endo_its == 0: with table(cls='results'): th('Model Type') th('Model') for ext in ext_lst: with tr(): td('Multiplied by %s' % ext) td(a(img(src=r'file:./%s\mult%s.gv.svg' % (dlink, ext), height=self.model_height, width=self.model_width) , href=r'file:./%s\mult%s.gv.svg' % (dlink, ext))) else: with table(cls='results'): for it in range(endo_its): th('Model Type') th('Model, group %s' % it) for ext in ext_lst: with tr(): for it in range(endo_its): td('Multiplied by %s' % ext) td(a( img(src=r'file:./%s\mult%s_%s.gv.svg' % (dlink, ext, it), height=self.model_height, width=self.model_width), href=r'file:./%s\mult%s_%s.gv.svg' % (dlink, ext, it))) div(p('These are the resulting models from the extreme condition test for extreme values in tables')) with table(cls='results'): th('Table') th('Model') for tbl in tbl_lst: with tr(): td(tbl[1]) td(a( img(src=r'file:./%s\table%s.gv.svg' % (dlink, tbl[0]), height=self.model_height, width=self.model_width), href=r'file:./%s\table%s.gv.svg' % (dlink, tbl[0]))) div(p('The following variables show unexpected behavior')) if flagged.empty: div(p('No variables show unexpected behavior')) else: raw(flag_tbl) div(p('The following tables had runs that could not be executed')) if tbl_flagged.empty: div(p('All table runs could be executed')) else: raw(tbl_flag_tbl) div(p('The following formulation errors for the tables in the model have been found')) if len(tbl_errors) == 0: div(p('No formulation errors have been found')) else: with table(cls='results'): th('Table') th('Error') for error in tbl_errors: with tr(): td(error[3]) td(error[1]) div(a('Link to the extreme condition folder', href=r'file:./%s' % dlink))
def insert_refs(s: str) -> Any: rv = nltermsregexp.sub(replacer, s) # print(rv) return raw(rv)
script(type='text/javascript', src='script.js') # Skeleton header = doc.add(header()).add(div(cls='wrapper')) map = doc.add(div(cls='map')) list = doc.add(section(cls='wrapper')).add(div(cls='annotated-list', id='heavens')) footer = doc.add(footer()).add(div(cls='wrapper')) # Header header.add(h1('Fairphone Angels')) header.add(a('More Info', cls='button btn-info', href='https://forum.fairphone.com/t/the-fairphone-angels-program-local-support-by-community-members/33058?u=stefan')) # Map map.add(raw(""" <iframe src="https://map.fairphone.community/?show=angels" allowfullscreen="true" frameborder="0"> <p><a href="https://map.fairphone.community/?show=angels" target="_blank">See the Fairphone Community Map!</a></p> </iframe> """)) # List list.add(raw(""" <input class="search" placeholder="Search"> <button class="sort asc" data-sort="location">Sort by name</button> <button class="sort" data-sort="country">Sort by country</button>""")) with list.add(div(cls='list')): for heaven in heavens: print(heaven.keys()) if 'exists' and 'active' in heaven: with div(cls='heaven'): div(heaven['country'], cls='country') div(heaven['location'], cls='location')
def l4_name_to_nl(s: str) -> html_tag: return span(raw(s.replace("_", " ")), cls="defined_term_intro")
def index(): # this is the query that gets the triples that will end up in the HTML table. result = g.query("""SELECT * WHERE { ?id geojson:properties[ rdfs:label ?label ; ramphsprops:chronogroup ?chronogroup] . ?chronogroup rdfs:label ?chronogroupl ; ramphsprops:start-date ?startdate . OPTIONAL { ?id geojson:properties[ramphsprops:dimensions [ ramphsprops:arena-major ?arenamajor] ] } OPTIONAL { ?id geojson:properties[ramphsprops:dimensions [ ramphsprops:arena-minor ?arenaminor] ] } OPTIONAL { ?id geojson:properties[ramphsprops:capacity [ ramphsprops:quantity ?capacity ] ] } OPTIONAL { ?id geojson:properties[ramphsprops:dimensions [ ramphsprops:exterior-major ?extmajor] ] } OPTIONAL { ?id geojson:properties[ramphsprops:dimensions [ ramphsprops:exterior-minor ?extminor] ] } OPTIONAL { ?id geojson:properties[ramphsprops:latintoponym ?latintoponym] } OPTIONAL { ?id geojson:properties[ramphsprops:moderncountry ?moderncountry] } OPTIONAL { ?id geojson:properties[ramphsprops:province ?province] } OPTIONAL { ?id geojson:properties[ramphsprops:region ?region] } } ORDER BY ?startdate ?chronogroupl DESC(?capacity)""", initNs=ns) # create a DOM and populate the head element rdoc = dominate.document(title="Searchable List of Roman Amphitheaters") rdoc.head += meta(charset="utf-8") rdoc.head += meta(http_equiv="X-UA-Compatible", content="IE=edge") rdoc.head += meta(name="viewport", content="width=device-width, initial-scale=1") # stylesheets rdoc.head += link( rel="stylesheet", href= "https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/css/bootstrap.min.css" ) rdoc.head += link( rel="stylesheet", href= "https://cdn.datatables.net/1.10.15/css/dataTables.bootstrap.min.css") # js libraries rdoc.head += comment(local_source) rdoc.head += script(src="https://code.jquery.com/jquery-2.2.4.min.js") rdoc.head += script( src= "https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/js/bootstrap.min.js" ) rdoc.head += script( src="https://cdn.datatables.net/1.10.15/js/jquery.dataTables.min.js") rdoc.head += script( src="https://cdn.datatables.net/1.10.15/js/dataTables.bootstrap.min.js" ) # This is the js that initiaties Datatables when the page is loaded. Now inside a python multi-line string rdoc.head += script( raw("""$(document).ready(function() { var table = $('#ramphs').DataTable( { initComplete: function () { this.api().columns([3,4,5]).every( function () { var column = this; var select = $('<select><option value=""></option></select>') .appendTo( $(column.footer()).empty() ) .on( 'change', function () { var val = $.fn.dataTable.util.escapeRegex( $(this).val() ); column .search( val ? '^'+val+'$' : '', true, false ) .draw(); } ); column.data().unique().sort().each( function ( d, j ) { select.append( '<option value="'+d+'">'+d+'</option>' ) } ); } ); }, "columnDefs": [ { "searchable": false, "orderable": false, "targets": 0 } ], 'paging': false } ); table.on( 'order.dt search.dt', function () { table.column(0, {search:'applied', order:'applied'}).nodes().each( function (cell, i) { cell.innerHTML = i+1; } ); } ).draw(); } );""")) with rdoc: with div(cls="container"): h1("Searchable List of Roman Amphitheaters") with p(): span("See ") a("http://github.com/sfsheath/roman-amphitheaters", href="http://github.com/sfsheath/roman-amphitheaters") span(" for much more data and overview of project.") # for now I'm rendering direct to an HTML table. Could use JSON but with only 250+ records, # rendering speed doesn't seem to be a problem. with table(id="ramphs"): with thead(): with tr(): th("") th("Label") th("Latin Toponym") th("Country") th("Region or Province") th("Period") th("Capacity") th("Ext. Major") th("Ext. Minor") th("Arena Major") th("Arena Minor") with tfoot(): th("") th("") th("") th("Country") th("Region or Province") th("Period") with tbody(): for r in result: with tr(): td("") td( a(str(r.label), href="/ramphs/id/{}".format( str(r.id).replace( 'http://purl.org/roman-amphitheaters/resource/', '')))) # td(str(r.label)) if str(r.latintoponym) != 'None': td(str(r.latintoponym)) else: td("") if str(r.moderncountry) != 'None': td(str(r.moderncountry)) else: td("") # combine province / region into single column since no amphitheater is both if str(r.region) != 'None': td( str(r.region).replace( 'http://purl.org/roman-amphitheaters/resource/', '')) elif str(r.province) != 'None': td( str(r.province).replace( 'http://purl.org/roman-amphitheaters/resource/', '')) else: td("") if str(r.chronogroupl) != 'None': if str(r.startdate) != 'None': td(str(r.chronogroupl), data_sort=str(r.startdate)) else: td(str(r.chronogroupl)) else: td("") if str(r.capacity) != 'None': td(str(r.capacity)) else: td("") if str(r.extmajor) != 'None': td(str(r.extmajor)) else: td("") if str(r.extminor) != 'None': td(str(r.extminor)) else: td("") if str(r.arenamajor) != 'None': td(str(r.arenamajor)) else: td("") if str(r.arenaminor) != 'None': td(str(r.arenaminor)) else: td("") with p(): span("See ") a("https://github.com/sfsheath/roman-amphitheaters-heroku", href="https://github.com/sfsheath/roman-amphitheaters-heroku" ) span(" for python code that generates this page.") return rdoc.render()
def write_qc(adata_unfiltered, adata_filtered, version, analysis_name, standard_min_genes, standard_min_cells, standard_min_counts, standard_percent_mito, standard_max_counts, standard_n_genes, filtering_output1, filtering_output2, results_folder, css_path): """generates the first part of the documentation """ adata = adata_unfiltered copyfile(css_path, join(results_folder, 'style.css')) #part one before filtering doc = document(title='QC Report - ' + analysis_name) with doc.head: link(rel='stylesheet', href='style.css') with doc: with div(): attr(cls='body') h1('Quality Control Report - Single Cell Analysis Standard Pipeline' ) with p(style="font-size:12px"): text('single cell analysis standard workflow version ' + str(version)) br() text('analysis name: ' + str(analysis_name)) br() text('report generated on: ' + str(date.today())) h2('Raw Dataset Properties') #generate a table with table(): attr(cls='minimalistBlack') l = tr() l += th('total number of cells') l += td(adata.n_obs) l = tr() l += th('number of genes') l += td(adata.n_vars) if 'donor' in adata.obs.columns.tolist(): l = tr() l += th('number of donors') l += td(len(adata.obs.donor.value_counts().index.tolist())) if 'condition' in adata.obs.columns.tolist(): l = tr() l += th('number of conditions') l += td( len(adata.obs.condition.value_counts().index.tolist())) if 'treatment' in adata.obs.columns.tolist(): l = tr() l += th('treatments') l += td(adata.obs.donor.value_counts().index.tolist()) br() img(src='./figures/transcriptcaptureefficiency.png', width='500px') br() img(src='./figures/librarysize.png', width='500px') br() h2('Filtering') with div(): attr(style='float: left') with table(): attr(cls='minimalistBlack2', style='float: left') with thead(): l = tr() l += th('used filtering parameters', colspan=2) with tbody(): l = tr() l += th('min. genes per cell: ') l += td(standard_min_genes) l = tr() l += th('min. UMI counts per cell: ') l += td(standard_min_counts) l = tr() l += th('max. genes per cell: ') l += td(standard_n_genes) l = tr() l += th('max. UMI counts per cell: ') l += td(standard_max_counts) l = tr() l += th('max. mitochondrial gene content: ') l += td(standard_percent_mito) l = tr() l += th('min. cells expressing a gene: ') l += td(standard_min_cells) with tfoot(): l = tr() l += th('', colspan=2) with div(): attr(style='float: left') with p(style="font-size:10px"): for x in filtering_output1.stdout.split('\n'): text(x) br() for x in filtering_output2.stdout.split('\n'): text(x) br() with div(style='clear: both'): h4('visualization of filtering thresholds') img(src='./figures/filtering_thresholds.png', width='500px') br() with table(): attr(cls='minimalistBlack', style='float: none') with thead(): l = tr() l += th('') l += th('number of cells before filtering') l += th('number of cells after filtering') with tbody(): l = tr() l += th('total cells') l += td(adata.n_obs) if 'donor' in adata.obs.columns.tolist(): table_donor = {} for donor in adata.obs.donor.value_counts( ).index.tolist(): subset_cells = adata[adata.obs.donor == donor, :].copy().n_obs table_donor[donor] = tr() table_donor[donor] += th(donor) table_donor[donor] += td(subset_cells) if 'condition' in adata.obs.columns.tolist(): table_condition = {} for condition in adata.obs.condition.value_counts( ).index.tolist(): subset_cells = adata[adata.obs.condition == condition, :].copy().n_obs table_condition[condition] = tr() table_condition[condition] += th(condition) table_condition[condition] += td(subset_cells) if 'treatment' in adata.obs.columns.tolist(): table_treatment = {} for treatment in adata.obs.treatment.value_counts( ).index.tolist(): subset_cells = adata[adata.obs.treatment == treatment, :].copy().n_obs table_treatment[treatment] = tr() table_treatment[treatment] += th(treatment) table_treatment[treatment] += td(subset_cells) with tfoot(): foot = tr() foot += th('') foot += th('') foot += th('') br() #part two after filtering adata = adata_filtered #add number of cells l += td(adata.n_obs) if 'donor' in adata.obs.columns.tolist(): for donor in adata.obs.donor.value_counts().index.tolist(): subset_cells = adata[adata.obs.donor == donor, :].copy().n_obs table_donor[donor] += td(subset_cells) if 'condition' in adata.obs.columns.tolist(): for condition in adata.obs.condition.value_counts().index.tolist(): subset_cells = adata[adata.obs.condition == condition, :].copy().n_obs table_condition[condition] += td(subset_cells) if 'treatment' in adata.obs.columns.tolist(): for treatment in adata.obs.treatment.value_counts().index.tolist(): subset_cells = adata[adata.obs.treatment == treatment, :].copy().n_obs table_treatment[treatment] += td(subset_cells) #create table overview of the used filtering parameters with doc: with div(style='clear: both'): h2('Filtered Dataset Properties') img(src='./figures/violin.after_filtering.png', width='500px') br() img(src='./figures/top_genes.png', width='500px') topn = top_counts_genes(adata=get_raw(adata), top_n=10) raw(topn.to_html(classes='', header=True, index=False)) h2('Highly Variable Gene Selection') img(src='./figures/filter_genes_dispersion.hvg.png', width='500px') h2('Principle Component Analysis') br() img(src='./figures/PCA.png', width='500px') br() h2('Clustering') img(src='./figures/umap.louvain.png', width='300px') with open(join(results_folder, 'qc_report.html'), 'w') as f: f.write(doc.render()) #this commented out code is functional but was removed from besca because we did not manage to include weasybuild in the easybuild module #convert to a pdf #HTML(join(results_folder, 'qc_report.html')).write_pdf(join(results_folder, 'qc_report.pdf'), presentational_hints=True) #remove copied css.style #remove(join(results_folder, 'style.css')) #remove(join(results_folder, 'qc_report.html')) return (None)
onkeyup='my_filter_greater(13, "inp4", "myTable")', placeholder="Filter top coverage content greater than..") input(type="number", id="inp5", onkeyup='my_filter_lesser_abs(9, "inp5", "myTable")', placeholder="Filter distance to TSS closer than..") with table(id="myTable") as _table: _tr = tr() _tr.add([ th(x[1][0], onclick='sortTable(%d, %d)' % (x[0], x[1][1])) for x in enumerate(zip(header, dtypes)) ]) for peak in peaks: with tr(): td( raw('<a href=%s target="_blank">ucsc_link</a>' % add_ucsc(peak, args.ucsc, flank=25, chr_dict=chrdict))) for entry in flatten_peak(peak): td(entry) _script = script(raw(plain_script), type='text/javascript') with open(os.path.join(args.outdir, 'peaks.html'), 'w') as f: #print(doc.render()) f.write(doc.render()) with open(os.path.join(args.outdir, 'peaks.tsv'), 'w') as f: f.write("%s\n" % "\t".join(header[1:])) for peak in peaks: f.write("%s\n" % "\t".join([str(x) for x in flatten_peak(peak)]))
def create_html_string(COUNTERS, BALLOTLISTS, DISAGREED_INFO_DICT): """Creates a HTML string for generating the summary file. Accesses the following: COUNTERS['ballots_processed'] COUNTERS['styles_detected'] COUNTERS['matched_ballots'] COUNTERS['non_matched_ballots'] COUNTERS['blank_ballots'] list of ballot OVERVOTED_BALLOTS list of ballot DISAGREED_BALLOTS accesses ballot pdf files per precinct and ballot_id DISAGREE_INFO_DICT is keyed by ballot_id which provides dict of contests providing error information f"{config_dict['RESOURCES_PATH']}{config_dict['DISAGREEMENTS_PATHFRAG']}{ballot.ballotdict['precinct']}/{ballot.ballotdict['ballot_id']}.pdf") list STYLES style.style_num style.number style.build_from_count files style_summary = glob.glob(f"{config_dict['RESOURCES_PATH']}{config_dict['STYLES_PATHFRAG']}{style.code}.html")[0] list VOTES_RESULTS (results for each contest) result_contest['contest_name'] result_contest['selections'] result_contest['vote_for'] result_contest['question'] result_contest['total_ballots'] result_contest['total_votes'] result_contest['undervote'] result_contest['overvote'] """ script_abs_path = os.path.abspath('assets/copy_to_clipboard.js') version = utils.show_version() doc = dominate.document(title='Audit Engine version: ' + version) with doc.head: link( rel='stylesheet', href= 'https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css', integrity= "sha384-ggOyR0iXCbMQv3Xipma34MD+dH/1fQ784/j6cY/iJTQUOhcWr7x9JvoRxT2MZw1T", crossorigin="anonymous", ) script(type='text/javascript', src=script_abs_path) with doc: with div(cls='container'): with div(cls='jumbotron'): h1('Audit Engine: {version} - vote records summary'.format( version=version)) build_time = datetime.datetime.now(datetime.timezone.utc) p(f'Summary built at: {build_time.strftime("%Y-%m-%d %H:%M:%S")}', cls='lead') with table(cls='table table-striped'): with tbody(): with tr(): th('Number of ballots processed') td(COUNTERS['ballots_processed']) with tr(): th('Number of different ballot types') td(COUNTERS['styles_detected']) with tr(): th('Number of ballots matching the CVR results') td(COUNTERS['matched_ballots']) with tr(): th('Number of ballots not matching the CVR results') td(COUNTERS['non_matched_ballots']) with tr(): th('Number of completely blank ballots') td(COUNTERS['blank_ballots']) with tr(): th('Number of overvotes') td(COUNTERS['overvoted_ballots']) with tr(): th('Number of disagreements') td(COUNTERS['disagreed_ballots']) with div(cls='my-4'): h2('Styles') with table(cls='table table-striped'): with thead(): with tr(): th('Style code', scope="col") th('Style number', scope="col") th('Based on number of ballots', scope="col") th('Built at', scope="col") with tbody(): for style in STYLES: with tr(): utc_time = datetime.datetime.utcfromtimestamp( style.timestamp) style_summary = glob.glob( f"{config_dict['RESOURCES_PATH']}{config_dict['STYLES_PATHFRAG']}{style.code}.html" )[0] td( a(style.style_num, href=os.path.realpath(style_summary), target="_blank")) td(style.number) td(style.build_from_count) td(f'{utc_time.strftime("%Y-%m-%d %H:%M:%S")}') # Tables with contests results: with div(cls='my-4'): h2('Contests results') for result_contest in VOTES_RESULTS: contest_name = result_contest['contest_name'] selections = result_contest['selections'] vote_for = result_contest['vote_for'] question = result_contest['question'] with div(cls='my-4'): h5(f'Contest results "{contest_name}" (vote for {vote_for}):' ) if question: h6(f'Question "{question}"') with table(cls='table table-striped'): with thead(): with tr(): th('#', scope="col") th('Candidate', scope="col") th('Votes', scope="col") th('%', scope="col") with tbody(): for index, candidate_name in enumerate( sort_option_names(selections.keys())): try: total_votes = result_contest['total_votes'] percent = round( (selections[candidate_name] / total_votes) * 100, 2) except ZeroDivisionError: percent = 0.0 with tr(): th(index + 1, scope="row") td(candidate_name) td(candidate_name) td(f'{percent}%') with table(cls='table table-striped'): with tbody(): with tr(): th('Total number of ballots') td(result_contest['total_ballots']) with tr(): th('Number of votes') td(result_contest['total_votes']) with tr(): th('Number of undervotes') td(result_contest['undervote']) with tr(): th('Number of overvotes') td(result_contest['overvote']) # Table with overvotes: with div(cls='my-4'): h2('Ballots with overvotes:') with table(cls='table table-striped'): with thead(): with tr(): th('#', scope="col") th('Precinct / Contest name', scope="col") th('Ballot file / Ballot and CVR status', scope="col") th('Overvotes / Contest validation status', scope="col") with tbody(): dirpath = DB.dirpath_from_dirname('overvotes') for index, ballot_id in enumerate( BALLOTLISTS['overvoted_ballots']): filepathlist = glob.glob( f"{dirpath}**/{ballot_id}i.pdf", recursive=True) if not filepathlist: continue filepath = filepathlist[0] with tr(): th(index + 1, scope="row") td('') with td(): ballot_image_filepath = os.path.abspath( filepath) a(ballot_id, href=ballot_image_filepath, target="_blank") td('') # overvotes_contests = list( # filter( # lambda x: (x.contest_ballot_status == STATUS_DICT['overvote']) or # (x.contest_cvr_status == STATUS_DICT['overvote']), ballot.ballotdict['contests'])) # for contest in overvotes_contests: # with tr(): # td() # td(contest.contest_name) # td(f"{contest.contest_ballot_status} / {contest.contest_cvr_status}") # td(contest.contest_validation if contest.contest_validation is not None else '') # Table with blank ballots: with div(cls='my-4'): h2('Blank Ballots:') with table(cls='table table-striped'): with thead(): with tr(): th('#', scope="col") th('Precinct / Contest name', scope="col") th('Ballot file / Ballot and CVR status', scope="col") th('Overvotes / Contest validation status', scope="col") with tbody(): dirpath = DB.dirpath_from_dirname('blank_ballots') for index, ballot_id in enumerate( BALLOTLISTS['blank_ballots']): filepathlist = glob.glob(f"f{dirpath}{ballot_id}i.pdf", recursive=True) if not filepathlist: continue filepath = filepathlist[0] with tr(): th(index + 1, scope="row") td('') with td(): ballot_image_filepath = os.path.abspath( filepath) a(ballot_id, href=ballot_image_filepath, target="_blank") td('') # Table with disagreements: with div(cls='my-4'): h2('Ballots with disagreements:') with table(cls='table table-striped'): with thead(): with tr(): th('#', scope="col") th('Ballot file', scope="col") th('Disagreement Details', scope="col") with tbody(): dirpath = DB.dirpath_from_dirname('disagreements') for index, ballot_id in enumerate( BALLOTLISTS['disagreed_ballots']): filepathlist = glob.glob( f"{dirpath}**/{ballot_id}i.pdf", recursive=True) if not filepathlist: continue filepath = filepathlist[0] with tr(): th(index + 1, scope="row") with td(): ballot_image_filepath = os.path.abspath( filepath) a(ballot_id, href=ballot_image_filepath, target="_blank") td( raw(f"<pre>{DISAGREED_INFO_DICT[ballot_id]}</PRE>" )) return doc
def seed_to_html(hsp, seed, aln_marks, _style): organism, uniprot, tq_start, tq_end = hsp.query.id.split(":") name = "_".join((organism, uniprot)) h_start = hsp.hit_start + seed[0]; h_end = hsp.hit_start + seed[1]; q_start = hsp.query_start + seed[0] + int(tq_start); q_end = hsp.query_start + seed[1] + int(tq_start); #print(hsp.query.id) #_ , uniprot, name = hsp.hit.id.split("|") doc = dominate.document(title="Query: %s Hit: %s Hit Range: %d-%d" % (hsp.query.id, name, h_start, h_end) ) with doc.head: style(_style) with doc: with p(): b("Query: ") raw(hsp.query.id) b("Range: ") raw("%d-%d" % (q_start, q_end) ) with p(): b("Hit: ") raw('<a href=%s target="_blank">%s</a>' % (add_uniprot_ref(name), hsp.hit.id) ) b("Range: ") raw("%d-%d" % (h_start, h_end) ) with p(): b("Score: ") raw("%1.1f" % seed[2]) with p(): b("Identity: ") raw("\t%1.2f%%" % (100*identity)) with p(): b("Alignment:") seq1 = str(hsp.aln[0].seq).upper()[seed[0]:seed[1]] seq2 = str(hsp.aln[1].seq).upper()[seed[0]:seed[1]] with p(): ugly_fonts = [[], []] aln_line = '' for start, stop, tclass in aln_marks: s = '<font color=%s>%s</font>' % (colors[tclass], seq1[start:stop]) ugly_fonts[0].append(s); s = '<font color=%s>%s</font>' % (colors[tclass], seq2[start:stop]) ugly_fonts[1].append(s); aln_line += aln_signs[tclass]*(stop-start) raw(''.join(ugly_fonts[0])) br() font(aln_line, color='black') br() raw(''.join(ugly_fonts[1])) fname = "%s_%s_%d_%d.html" % (name, hsp.hit.id, q_start, q_end) fpath = os.path.join(LOCAL_HTML_PATH, fname) with open(fpath, 'w') as f: f.write(doc.render()) return name, hsp.query.id, hsp.hit.id, fname, q_start, q_end, h_start, h_end, identity, seed[2]
def _company(self): company = self._toml['company'] assert isinstance(company, str) td(raw(company), colspan=3, class_name='value')
gene_level_res = [] header = "Query", "Hit", "Link", "q_start", "q_end", "h_start", "h_end", "Identity [%]", "Score" for name, local_list in res_dict.items(): gene_level_res.append(( name, len(local_list), sum([x[-1] for x in local_list]) )) local_list.sort(key = lambda x: x[-1], reverse = True) with open(os.path.join(LOCAL_GENES_PATH, '%s.html' % name), 'w') as f: doc = dominate.document(title="%s: similarity blastp predictions" % os.path.basename(args.outdir)) with doc.head: style(_style) with doc: with table(id = "myTable") as _table: _tr = tr() _tr.add([td(x) for x in header]) for query, hit, fname, q_start, q_end, h_start, h_end, identity, score in local_list: with tr(): td(raw('<a href=%s target="_blank">%s</a>' % (add_uniprot_ref(name), name) )) td(hit) td(raw('<a href=%s target="_blank">link</a>' % os.path.join('../local', fname) )) td(q_start) td(q_end) td(h_start) td(h_end) td("%1.2f" % (100*identity)) td("%1.1f" % score) f.write(doc.render()) header = "gene name", "link to hits", "number of hits", "total score" gene_level_res.sort(key = lambda x: x[-1], reverse = True) doc = dominate.document(title="%s: Human genes with counterparts" % os.path.basename(args.outdir))
def index_dot_html(rex_dir: str | Path) -> None: rex_dir = Path(rex_dir) doc = document(title="tW Fit") with doc.head: raw(CSS) mpl_dir = rex_dir / "matplotlib" if not mpl_dir.exists(): subprocess.Popen( f"tdub rex stacks --no-chisq --no-internal --png {rex_dir}", shell=True ).wait() subprocess.Popen( f"python3 -m tdub.internal.cramped {rex_dir}", shell=True ).wait() img_pairs = [] for entry in mpl_dir.glob("*.png"): if "VRP" in entry.stem: pair = (mpl_dir / f"{entry.stem}.pdf", mpl_dir / f"{entry.stem}.png") img_pairs.append(pair) allrs_png_pair = ("matplotlib/allregions_pre.png", "matplotlib/allregions_post.png") r1j1b_png_pair = ("matplotlib/reg1j1b_preFit.png", "matplotlib/reg1j1b_postFit.png") r2j1b_png_pair = ("matplotlib/reg2j1b_preFit.png", "matplotlib/reg2j1b_postFit.png") r2j2b_png_pair = ("matplotlib/reg2j2b_preFit.png", "matplotlib/reg2j2b_postFit.png") allrs_pdf_pair = ("matplotlib/allregions_pre.pdf", "matplotlib/allregions_post.pdf") r1j1b_pdf_pair = ("matplotlib/reg1j1b_preFit.pdf", "matplotlib/reg1j1b_postFit.pdf") r2j1b_pdf_pair = ("matplotlib/reg2j1b_preFit.pdf", "matplotlib/reg2j1b_postFit.pdf") r2j2b_pdf_pair = ("matplotlib/reg2j2b_preFit.pdf", "matplotlib/reg2j2b_postFit.pdf") with doc: h1("Latest tW Fit") p("Generated {}".format(datetime.now().strftime("%b-%d-%Y %H:%M:%S"))) with p(): a("INT note link", href="https://cds.cern.ch/record/2667560") h2("Main Plots") p("Click images for PDF versions.") p("Prefit individuals") with div(cls="row"): with a(href=str(r1j1b_pdf_pair[0])): img(src=str(r1j1b_png_pair[0]), width=r"250px") with a(href=str(r2j1b_pdf_pair[0])): img(src=str(r2j1b_png_pair[0]), width=r"250px") with a(href=str(r2j2b_pdf_pair[0])): img(src=str(r2j2b_png_pair[0]), width=r"250px") p("Prefit combined") with div(cls="row"): with a(href=str(allrs_pdf_pair[0])): img(src=str(allrs_png_pair[0]), cls="marginauto") p("Postfit individuals") with div(cls="row"): with a(href=str(r1j1b_pdf_pair[1])): img(src=str(r1j1b_png_pair[1]), width=r"250px") with a(href=str(r2j1b_pdf_pair[1])): img(src=str(r2j1b_png_pair[1]), width=r"250px") with a(href=str(r2j2b_pdf_pair[1])): img(src=str(r2j2b_png_pair[1]), width=r"250px") p("Postfit combined") with div(cls="row"): with a(href=str(allrs_pdf_pair[1])): img(src=str(allrs_png_pair[1]), cls="marginauto") h2("Grouped Uncertainty Impacts (alphabetical then descending)") with div(): p(raw(grouped_impacts_table(rex_dir, descending=False, tablefmt="html"))) p(raw(grouped_impacts_table(rex_dir, descending=True, tablefmt="html"))) h2("Other Plots 1j1b") with div() as d: add_images( d, ["pT_lep1", "pT_lep2", "pT_jet1", "met"], "1j1b", "pre", t="1j1b pre-fit kinematics", ) add_images( d, ["pT_lep1", "pT_lep2", "pT_jet1", "met"], "1j1b", "post", t="1j1b post-fit kinematics", ) add_images( d, ["pTsys_lep1lep2jet1met", "pT_jetS1", "cent_lep1lep2"], "1j1b", "pre", t="Top 3 1j1b BDT inputs pre-fit", ) add_images( d, ["pTsys_lep1lep2jet1met", "pT_jetS1", "cent_lep1lep2"], "1j1b", "post", t="Top 3 1j1b BDT inputs post-fit", ) h2("Other Plots 2j1b") with div() as d: add_images( d, ["pT_lep1", "pT_lep2"], "2j1b", "pre", t="2j1b pre-fit kinematics" ) add_images(d, ["pT_jet1", "pT_jet2", "met"], "2j1b", "pre") add_images( d, ["pT_lep1", "pT_lep2"], "2j1b", "post", t="2j1b post-fit kinematics" ) add_images(d, ["pT_jet1", "pT_jet2", "met"], "2j1b", "post") add_images( d, ["mass_lep1jet2", "mass_lep1jet1", "pTsys_lep1lep2jet1met"], "2j1b", "pre", t="Top 3 2j1b BDT inputs pre-fit", ) add_images( d, ["mass_lep1jet2", "mass_lep1jet1", "pTsys_lep1lep2jet1met"], "2j1b", "post", t="Top 3 2j1b BDT inputs post-fit", ) h2("other Plots 2j2b") with div() as d: add_images( d, ["pT_lep1", "pT_lep2"], "2j2b", "pre", t="2j2b pre-fit kinematics" ) add_images(d, ["pT_jet1", "pT_jet2", "met"], "2j2b", "pre") add_images( d, ["pT_lep1", "pT_lep2"], "2j2b", "post", t="2j2b post-fit kinematics" ) add_images(d, ["pT_jet1", "pT_jet2", "met"], "2j2b", "post") add_images( d, ["mass_lep1jet1", "mass_lep1jet2", "pT_jet2"], "2j2b", "pre", t="Top 3 2j2b BDT inputs pre-fit", ) add_images( d, ["mass_lep1jet1", "mass_lep1jet2", "pT_jet2"], "2j2b", "post", t="Top 3 2j2b BDT inputs post-fit", ) with open(Path(rex_dir) / "index.html", "w") as f: print(doc, file=f)
def generate_html(global_summaries): print("----------------------------------------------------------------") print("\nGENERATING HTML") doc = dominate.document(title='RSS Summaries') with doc: for summary, vals in global_summaries.items(): dic = {} for e in vals[0]: blob = TextBlob(e) for l in blob.noun_phrases: if l not in dic: dic[l] = 1 else: dic[l] = dic[l] + 1 order_phrase = sorted(dic.items(), key=itemgetter(1), reverse=True) correlated_terms = union_similiar_terms(dic) highlight = 3 for e in order_phrase[:highlight]: for x in correlated_terms: if e[0] in x: if x.index(e[0]) == 0: search_for = x[1] else: search_for = x[0] dic[search_for] = dic[search_for] + dic[e[0]] dic[e[0]] = dic[search_for] order_phrase = sorted(dic.items(), key=itemgetter(1), reverse=True) top = [] for e in order_phrase[:highlight]: top.append(e[0]) for e in correlated_terms: if (e[0] in top) and (e[1] in top): highlight += 1 top = [] for e in order_phrase[:highlight]: top.append(e[0]) top = sorted(top, key=len, reverse=True) print("top phrases: ", top) list_colors = ["#FF8C00", "#CD5C5C", "#32CD32", "#CD853F"] with div(id=summary): with table( style= "border:solid 1px; width:100%; float:left;border-collapse: collapse; margin-bottom:10px;" ).add(tbody()): l = tr(style="border: 1px solid black;") l += td(p( "Using: " + summary, style= "margin-top:5px; margin-bottom:5px; margin-left:15px;" ), style="border: solid 1px;", width="70%") l.add( td(p( "From:", style= "margin-top:5px; margin-bottom:5px; margin-left:15px;" ), style="border: solid 1px;", width="30%")) for e in vals[0]: l = tr(style="border: 1px solid black;") print(vals[1][e]) url = vals[1][e][1] site = vals[1][e][0] e = e.lower() for i in range(len(top)): e = e.replace( top[i], '<label style="text-decoration:underline; background:' + list_colors[i] + '; ">' + top[i] + '</label>') e = e.replace(e, '<a href="' + url + '">' + e + '</a>') l += td(p( raw(e), style= "margin-top:5px; margin-bottom:5px; margin-left:15px;" ), style="border: solid 1px;", width="70%") l.add( td(p(site, style="margin-left:15px;"), style="border: solid 1px;", width="30%")) words = '' for e in range(len(top)): words += ' <label style="background:' + list_colors[ e] + ';">' + top[e] + '</label> ' words += '' words = words.replace( words, 'Top Topics: [<label style="font-style:italic;">' + words + '</label> ]') l = tr(style="border: 1px solid black;") l += td(p( raw(words), style= "text-align:center; margin-top:10px; margin-bottom:10px;" ), colspan="2", style="border: solid 1px;") file = open('summaries.html', 'w') file.write(str(doc)) file.close() print("----------------------------------------------------------------")
with open(args.css) as f: _style = f.read() with doc.head: style(_style) with open(args.js) as f: plain_script = f.read() with doc: with table(id = "myTable") as _table: _tr = tr() _tr.add([ th(x[1][0], onclick='sortTable(%d, %d)' % (x[0], x[1][1])) for x in enumerate(zip(labels, dtypes)) ]) for d in data: with tr(): td(d[0]) td(d[1]) td(d[2]) td(raw('<a href=%s target="_blank">%s</a>' % (d[3], "click me") )) _script = script(raw(plain_script), type='text/javascript') print(doc.render());
with open(args.css) as css: _style = css.read() for gene in genes: ########### HTML SECTION ########### with open(os.path.join(args.outdir, "%s.html" % gene.name), 'w') as f: doc = dominate.document(title="%s: gene overview" % gene.name) with doc.head: style(_style) with doc: p(strong(gene.name)) br() with p(): b("Gene Symbol:") raw("\t%s" % gene.attrs['genesymbol']) with p(): b("CDS:") raw('<a href=%s target="_blank">\t%s(%s) %d-%d</a>' % (add_ucsc(gene, args.ucsc), gene.chrom, gene.strand, gene.start, gene.stop)) #raw("\t%s(%s) %d-%d " % (gene.chrom, gene.strand, gene.start, gene.stop)) with p(): b("Alternative TSS:") raw("\t%s" % gene.attrs["alt_tss"].replace(",", ", ")) with p(): b("Annotation:") raw("\t%s" % gene.attrs['annotation']) with p(): b("Function:") raw("\t%s" % gene.attrs.get('function',
with doc.head: style(_style) with doc: p(strong("HrrA All-In table")) input(type="text", id="myInput", onkeyup="my_search(4)", placeholder="Search for a genesymbol..") #input(type="number", id="myInputGreater", onkeyup="my_filter_greater(6)", placeholder="Filter AT content greater than..") with table(id = "myTable") as _table: _tr = tr() _tr.add([ th(x[1][0], onclick='sortTable(%d, %d)' % (x[0], x[1][1])) for x in enumerate(zip(headers, dtypes)) ]) for a in all_in_table: with tr(): #td(raw('<a href=%s target="_blank">ucsc_link</a>' % add_ucsc(interval, args.ucsc)) ) td(raw('<a href=%s target="_blank">ucsc_link</a>' % add_ucsc(a[1], a[2], args.ucsc)) ) for el in a[1:]: td(ndict.get(el, el)) _script = script(type='text/javascript') _script.add_raw_string(plain_script) print(doc.render());
def main(): with open(sys.argv[1], encoding='utf-8') as f: board = json.load(f) # Build indexes into the JSON structure. labels = {lab['id']: lab for lab in board['labels']} members = {mem['id']: mem for mem in board['members']} lists = {lis['id']: lis for lis in board['lists']} cards = {card['id']: card for card in board['cards']} actions = {action['id']: action for action in board['actions']} checklists = {check['id']: check for check in board['checklists']} # Grouped collections of things. cards_by_list = defaultdict(list) for card in board['cards']: cards_by_list[card['id']].append(card) comments_by_card = defaultdict(list) for com in (c for c in board['actions'] if c['type'] == 'commentCard'): comments_by_card[com['data']['card']['id']].append(com) # Markdown renderer. md = Markdown() title = "Exported Trello Board: " + board['name'] doc = dominate.document(title=title) with doc: h1(title) hr() with div(id="members"): h2("Trello Board Members") with ul(): for m in board['members']: li("{} (@{})".format(m['fullName'], m['username'])) hr() with div(id="lists"): h2("Trello Lists") with ul(): for l in board['lists']: li("{}".format(l['name'])) hr() with div(id="cards"): h2("Trello Cards") for c in board['cards']: with div(cls="card"): h3("Card Title: {}".format(c['name'])) with dl(classmethod="carddetails"): dt("Last Activity:") dd(c['dateLastActivity']) dt("Trello List:") dd(lists[c['idList']]['name']) if c['due']: dt("Due Date:") dd(c['due']) if c['desc']: h4("Card Description:") with div(cls="carddesc"): div(raw((md.reset().convert(c['desc'])))) if c['idMembers']: with div(cls="cardmembers"): h4("Card Members:") with ul(): for m in c['idMembers']: li(members[m]['fullName']) if c['idLabels']: with div(cls="cardlabels"): h4("Card Labels:") with ul(): for l in c['idLabels']: li(labels[l]['name']) for cid in c['idChecklists']: clist = checklists[cid] with div(cls="checklist"): h4("Checklist: {}".format(clist['name'])) with ul(): for item in clist['checkItems']: li("{} ({})".format(item['name'],item['state'])) if comments_by_card[c['id']]: with div(cls="comments"): h4("Card Comments:") for com in comments_by_card[c['id']]: with div(cls='cardcomment'): full = com['memberCreator']['fullName'] user = com['memberCreator']['username'] date = com['date'] tup = (full, user, date) h4("{} ({}) : {}".format(*tup)) md.reset() raw((md.convert(com['data']['text']))) # Line under each card. hr() with open(sys.argv[2], 'w', encoding='utf-8') as outfile: outfile.write(str(doc))
# Start creating HTML output html_output = html() with html_output: with head(): meta(charset='utf-8') meta(name='viewport', content='width=device-width, initial-scale=1, shrink-to-fit=no') # api.title title(get(json_api, 'api.title')) # css link(rel='stylesheet', type='text/css', href=style if style else bootstrap_css) for s in external_styles: link(type='text/css', href=s) raw('\n<style>' + css_style + '</style>\n') # api with body(): with div(_class='container'): # api.title h1(raw(get(json_api, 'api.title')), _class='display-4 mt-3') index_root = div(_class=card) index_body = index_root.add(div(_class='card-body')) with index_body: # api.description p(raw(get_multiline(json_api, 'api.description')), _class='card-text lead') # api.host p(b('Endpoint:'), code (raw(' '), get (json_api, 'api.host')),