def report_meta_info(report_file, version, hostname, working_directory, command_line_text): '''Generate the part of the page containing meta information about the annokey search, such as the command that was run, the hostname of the machine on which it was run, the date, a link to the annokey homepage. ''' this_div = html.HTML('div') list_element = this_div.ul version_item = list_element.li version_item.b("annokey version: ") version_item.text(version) annokey_item = list_element.li annokey_item.b("annokey homepage: ") annokey_item.a(ANNOKEY_URL, href=ANNOKEY_URL) hostname_item = list_element.li hostname_item.b("hostname: ") hostname_item.text(hostname) directory_item = list_element.li directory_item.b("directory: ") directory_item.text(working_directory) command_item = list_element.li command_item.b("command: ") command_item.text(command_line_text) date_item = list_element.li date_item.b("date: ") today = datetime.date.today() date_item.text(today.strftime('%d %b %Y')) report_file.write(str(this_div))
def inline_maps(map_list): """ Embeds the HTML source of the map_list directly into the IPython notebook. This method will not work if the map depends on any files (json data). Also this uses the HTML5 srcdoc attribute, which may not be supported in all browsers. map_list: 2-D array of maps. dimensions should be [nRows][nCols]. The method will throw a RuntimeError if not nRows: Number of rows nCols: Number of columns """ nRows = len(map_list) # nCols = max([len(row) for row in map_list]) hb = hgen.HTML() t = hb.table(width="100%") for r in range(nRows): row = t.tr for c in range(len(map_list[r])): currMap = map_list[r][c] currMap._build_map() row.td( '<iframe srcdoc="{srcdoc}" style="width: 100%; height: 510px; border: none"></iframe>' .format(srcdoc=currMap.HTML.replace('"', '"'))) return idisp.HTML( '<iframe srcdoc="{srcdoc}" style="width: 100%; height: {ht}px; border: none"></iframe>' .format(srcdoc=str(t).replace('"', '"'), ht=510 * nRows))
def threads_ranking(self, rank=5, resolution=None): data = self.query.threads_ranking(rank=rank) h = html.HTML() t = h.table() r = t.tr r.td('date', klass='td_date_t') r.td('from', klass='td_from_t') r.td('replies', klass='td_rep_t') r.td('subject', klass='td_subject_t') for i, row in data.iterrows(): r = t.tr print(row.index) r.td(str(row['date']), klass='td_date') r.td(row['from'], klass='td_from') r.td(str(row['nbr-references']), klass='td_rep') r.td('', klass='td_subject').text(str( h.a(row['subject'], href=row['url'])), escape=False) return str(t)
def vis_files_in_folder(): folder = '/home/ysheng/Documents/vis_models' webpage = html.HTML(folder, 'models', reflesh=1) img_folders = join(folder, 'imgs') files = get_files(img_folders) print("There are {} files".format(len(files))) prefix_set = set() for cur_file in tqdm(files): cur_name = os.path.splitext(os.path.basename(cur_file))[0] prefix_set.add(cur_name[:-3]) print('there are {} prefixs'.format(len(prefix_set))) prefix_set = list(prefix_set) prefix_set.sort() # import pdb; pdb.set_trace() relative_folder = './imgs' for i, prefix in enumerate(prefix_set): ims = [join(relative_folder, prefix + '{:03d}.png'.format(i)) for i in range(len(files) // len(prefix_set))] txts = [prefix + '{:03d}'.format(i) for i in range(len(files) // len(prefix_set))] links = ims webpage.add_images(ims, txts, links) webpage.save() print('finished')
def list_to_html(iterable, enclose=False): h = html.HTML() lst = enclose and h.ul or h for el in iterable: #lst.li(to_html(el)) itm = lst.li() itm.raw_text(to_html(el)) return h
def to_html(obj): h = html.HTML() if isinstance(obj, list): return list_to_html(obj) elif isinstance(obj, dict): return dict_to_html(obj) else: p = h.p('') p.text(str(obj)) return h
def report_head(report_file): '''Generare the head of the output page.''' head = html.HTML('head') head.meta(charset="UTF-8") title = 'Annokey Search Report' head.title(title) head.style(STYLE_CSS, type="text/css") head.script(JAVASCRIPT) document_str = HEAD_TEMPLATE.format(str(head)) report_file.write(document_str)
def create_html_indexes(args): """Create HTML indexes. The index.html file will be created within all folders of the `repo_dir` element of the *args* dict. :param args: Parsed arguments in dictionary format. :type args: ``dict`` """ full_path = utils.get_abs_path(file_name=args['repo_dir']) excludes = [utils.get_abs_path(file_name=i) for i in args['dir_exclude']] for fpath, afolders, afiles in os.walk(full_path): # Skip excluded directories. if [i for i in excludes if fpath.startswith(i)]: continue else: LOG.debug('Path Found: "%s"', fpath) _title = 'links for "%s"' % os.path.basename(fpath) index = html.HTML('html') head = index.head() head.title(_title) body = index.body(newlines=True) body.h1(_title) with utils.ChangeDir(fpath): LOG.debug('Folders Found: "%d"', len(afolders)) for afolder in sorted(afolders): full_folder_path = os.path.join(fpath, afolder) body.a( os.path.basename(full_folder_path), href=os.path.relpath(full_folder_path), rel="internal" ) body.br() LOG.debug('Files Found: "%d"', len(afiles)) for afile in sorted(afiles): if afile == 'index.html': continue full_file_path = os.path.join(fpath, afile) body.a( os.path.basename(full_file_path).split('#')[0], href=os.path.relpath(full_file_path), rel="internal", md='md5:%s' % return_hash(full_file_path) ) body.br() else: index_file = os.path.join(fpath, 'index.html') with open(index_file, 'wb') as f: f.write(str(index)) LOG.info('Index file [ %s ] created.', index_file)
def html_page(title=None, data=None): h = html.HTML() head = h.head if title: head.title(title) body = h.body if data: body.raw_text(to_html(data)) else: body.text('') return str(h)
def shape_to_placemark(doc, shape, id_str, name=None, record=None, fields=None): # Create the placemark element placemark = doc.createElement('Placemark') placemark.setAttribute('id', id_str) if name is not None: name_elem = doc.createElement('name') name_elem.appendChild(doc.createTextNode(name)) placemark.appendChild(name_elem) # Create a description of the element as a html document desc = html.HTML() # Do we have records? if record is not None: table = desc.table() th = table.thead() tr = th.tr() tr.td('Field') tr.td('Value') tb = table.tbody() for k, v in zip(fields, record): tr = tb.tr() tr.td(str(k[0])) tr.td(str(v)) # Append the HTML description to the placemark description = doc.createElement('description') description.appendChild(doc.createTextNode(str(desc))) placemark.appendChild(description) # Project all of the co-ordinates lngs, lats = pyproj.transform(shp_proj, kml_proj, [p[0] for p in shape.points], [p[1] for p in shape.points]) # Create a line string for the outline ls = doc.createElement('LineString') placemark.appendChild(ls) # Create a co-ordinates element with all these points in it coords = doc.createElement('coordinates') for lng, lat in zip(lngs, lats): coords.appendChild(doc.createTextNode('%f,%f,0' % (lng, lat))) ls.appendChild(coords) return placemark
def index(self, appid=None): """Return the index page.""" print "IN CVMFSAppVersion: appid=(%s)" % appid if appid not in self.valid_apps: print "Invalid app type %s" % appid return '' html_ = html.HTML() _, dirs, _ = os.walk(os.path.join(self.cvmfs_root, appid)).next() for dir_ in natsorted(dirs, reverse=True): for version in VERSION_RE.findall(dir_): html_.option(version) return str(html_)
def foo (iterable, h=html.HTML()): for tag, data in iterable: if tag == 'list': l = h.ul l.raw_text(list_to_html(data)) elif tag == 'table': t = h.table t.raw_text(table_to_html(data)) else: e = h.__getattr__(tag) #e.raw_text(to_html(v)) e.raw_text(str(v))
def table_to_html (table, enclose=False): """ Recibe una lista de listas y crea una tabla en html El sentido es filas-columnas El código devuelto no está encerrado en los tags <table></table> salvo que enclose sea True """ h = html.HTML() table_tag = enclose and h.table or h for row in table: tr = table_tag.tr for col in row: td = tr.td td.raw_text(to_html(col)) return h
def __init__(self, tpref="", sil_flag=False): self.targets = {} self.starttime = None self.endtime = None #dictionaries of target type we use to store our successes and failures self.successes = {} self.failures = {} self.html = html.HTML(sil_flag) self.images = {} self.os_stats = {} self.tpref = tpref self.sil_flag = sil_flag self.extras = False self.pofhosts = [] return
def dict_to_html(dictionary): ''' Recibe un OrderedDict cuyas claves son la etiqueta a aplicar al valor. Por ejemplo from OrderedDict import OrderedDict d = OrderedDict() d['h1'] = 'Hola' d['p'] = 'mundo' str(dict_to_html(d)) # <h1>hola</h1>\n<p>mundo</p> En caso de recibir un diccionario común, no se puede asegurar el orden en el que serán impresos los valores ''' h = html.HTML() for k, v in dictionary.iteritems(): e = h.__getattr__(k) e.raw_text(str(v)) return h
def __init__(self, path=None, **kwargs): if path: body = template.File(False, path) else: body = html.Value("body") meta = html.META(content="text/html; charset=utf-8") meta["http-equiv"] = "Content-Type" htmlBody = html.BODY(body, **kwargs) if self.bodyClass: htmlBody["class"] = self.bodyClass self.frame = html.Group( html.RawStr(self.dtd), html.HTML( html.HEAD(meta, html.Value("header"), html.TITLE(html.Value("pageTitle"))), htmlBody, xmlns="http://www.w3.org/1999/xhtml", ))
def GET(self, reqid=None): # pylint: disable=C0103 """REST Get method.""" print "IN GET: reqid=(%s)" % reqid requester = cherrypy.request.verified_user with db_session(self.dburl) as session: if reqid is None: if not requester.admin: query = session.query(Requests.id, Requests.request_date, Requests.sim_lead, Requests.status, Requests.description)\ .filter(Requests.requester_id == requester.id) return json.dumps({ 'data': [ dict(zip(COLUMNS, request)) for request in query.all() ] }) query = session.query(Requests.id, Requests.request_date, Requests.sim_lead, Requests.status, Requests.description, Users.dn)\ .join(Users, Requests.requester_id == Users.id) data = [] for request in query.all(): tmp = dict(zip(COLUMNS, request)) tmp['requester'] = name_from_dn(request.dn) data.append(tmp) return json.dumps({'data': data}) table = html.HTML().table(border='1') request = session.query(Requests).filter( Requests.id == reqid).first() if request is not None: for colname, value in request.iteritems(): row = table.tr() row.td(colname) row.td(str(value)) return str(table)
def display_current_results(self, visuals, epoch, iter=0): if self.display_id > 0: # show images in the browser idx = 1 for label, item in visuals.items(): if 'pc' in label: self.vis.scatter(np.transpose(item), Y=None, opts=dict(title=label, markersize=0.5), win=self.display_id + idx) elif 'img' in label: # the transpose: HxWxC -> CxHxW self.vis.image(np.transpose(item, (2, 0, 1)), opts=dict(title=label), win=self.display_id + idx) idx += 1 if self.use_html: # save images to a html file for label, image_numpy in visuals.items(): img_path = os.path.join( self.img_dir, 'epoch%.3d-%d_%s.png' % (epoch, iter, label)) util.save_image(image_numpy, img_path) # update website webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, reflesh=1) for n in range(epoch, 0, -1): webpage.add_header('epoch [%d]' % n) ims = [] txts = [] links = [] for label, image_numpy in visuals.items(): img_path = 'epoch%.3d-%d_%s.png' % (n, iter, label) ims.append(img_path) txts.append(label) links.append(img_path) webpage.add_images(ims, txts, links, width=self.win_size) webpage.save()
def display_current_results(self, visuals, epoch): for label, image_numpy in visuals.items(): img_path = os.path.join( self.img_dir, 'epoch%.3d_%s.png' % (epoch, label) ) util.save_image(image_numpy, img_path) # update website webpage = html.HTML( self.web_dir, 'Experiment name = %s' % self.name, reflesh=1 ) for n in range(epoch, 0, -1): webpage.add_header('epoch [%d]' % n) ims = [] txts = [] links = [] for label, image_numpy in visuals.items(): img_path = 'epoch%.3d_%s.png' % (n, label) ims.append(img_path) txts.append(label) links.append(img_path) webpage.add_images(ims, txts, links, width=self.win_size) webpage.save()
def report_hits(report_file, gene_count, gene_name, gene_db_id, hits): '''Generate the HTML for all the search hits for all the genes with at least one hit. ''' # don't report a gene if it has no hits if hits: gene_div = html.HTML('div') gene_div.h2(gene_name) ncbi_gene_url = NCBI_GENE_ENTRY_URL + gene_db_id gene_rif_url = GENE_RIF_URL + gene_db_id pubmed_url = PUBMED_URL + gene_db_id list_element = gene_div.ul item = list_element.li item.a("NCBI Gene", href=ncbi_gene_url) item = list_element.li item.a("GeneRIF", href=gene_rif_url) item = list_element.li item.a("Pubmed", href=pubmed_url) sorted_hits = sorted(hits) make_hit_table(gene_div, hits, sorted_hits) make_detailed_match_lists(gene_div, gene_count, hits, sorted_hits, gene_name) report_file.write(str(gene_div))
def vis_files(df_file): """ input is a pandas dataframe format: path, path,..., name,name, ... """ folder = '.' webpage = html.HTML(folder, 'benchmark', reflesh=1) relative_folder = './imgs' # for i, prefix in enumerate(prefix_set): # ims = [join(relative_folder, prefix + '{:03d}.png'.format(i)) for i in range(len(files) // len(prefix_set))] # txts = [prefix + '{:03d}'.format(i) for i in range(len(files) // len(prefix_set))] # links = ims # webpage.add_images(ims, txts, links) df = pd.read_csv(df_file) for i,v in tqdm(df.iterrows(), total=len(df)): img_range = len(v)//2+1 imgs = [join(relative_folder,v[i]) for i in range(1,img_range)] txts = [v[i] for i in range(img_range, len(v))] links = imgs webpage.add_images(imgs, txts, links) webpage.save() print('finished')
def from_dataframe(data_frame, table_name=None, name_map={}, url_map={}): header = [] if data_frame.index.name in name_map: header.append(name_map[data_frame.index.name]) else: header.append(data_frame.index.name) for h in data_frame.columns: if h in name_map: h = name_map[h] header.append(h) css_header = [] css_element = [] for i in header: css_header.append('td_' + i + '_t') css_element.append('td_' + i) h = html.HTML() if table_name: t = h.table(id=table_name, klass=table_name + '_t') else: t = h.table() # url map url_hash = {} url_skip = [] url_keys = url_map.keys() for u in url_keys: if u in header and url_map[u] in header: url_indx = header.index(url_map[u]) url_hash[header.index(u)] = url_indx url_skip.append(url_indx) header.pop(url_indx) #header r = t.tr n = 0 for j in header: r.td(str(j), klass=css_header[n]) n += 1 #elements for k, row in data_frame.iterrows(): r = t.tr r.td(str(k), klass=css_element[0]) n = 1 for l in row: if n in url_skip: continue if isinstance(l, float): if l % 1 > 0: l = '{0:.4f}'.format(l) else: l = int(l) if n in url_hash.keys(): url = row[url_hash[n] - 1] r.td('', klass=css_element[n]).text(str(h.a(str(l), href=url)), escape=False) else: r.td(str(l), klass=css_element[n]) n += 1 return str(t)
def display_current_results(self, visuals, epoch, save_result): if self.display_id > 0: # show images in the browser ncols = self.ncols if ncols > 0: ncols = min(ncols, len(visuals)) h, w = next(iter(visuals.values())).shape[:2] table_css = """<style> table {border-collapse: separate; border-spacing:4px; white-space:nowrap; text-align:center} table td {width: %dpx; height: %dpx; padding: 4px; outline: 4px solid black} </style>""" % (w, h) title = self.name label_html = '' label_html_row = '' images = [] idx = 0 for label, image in visuals.items(): image_numpy = util.tensor2im(image) label_html_row += '<td>%s</td>' % label images.append(image_numpy.transpose([2, 0, 1])) idx += 1 if idx % ncols == 0: label_html += '<tr>%s</tr>' % label_html_row label_html_row = '' white_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255 while idx % ncols != 0: images.append(white_image) label_html_row += '<td></td>' idx += 1 if label_html_row != '': label_html += '<tr>%s</tr>' % label_html_row # pane col = image row try: self.vis.images(images, nrow=ncols, win=self.display_id + 1, padding=2, opts=dict(title=title + ' images')) label_html = '<table>%s</table>' % label_html self.vis.text(table_css + label_html, win=self.display_id + 2, opts=dict(title=title + ' labels')) except ConnectionError: self.throw_visdom_connection_error() else: idx = 1 for label, image in visuals.items(): image_numpy = util.tensor2im(image) self.vis.image(image_numpy.transpose([2, 0, 1]), opts=dict(title=label), win=self.display_id + idx) idx += 1 if self.use_html and (save_result or not self.saved): # save images to a html file self.saved = True for label, image in visuals.items(): image_numpy = util.tensor2im(image) img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label)) util.save_image(image_numpy, img_path) # update website webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, reflesh=1) for n in range(epoch, 0, -1): webpage.add_header('epoch [%d]' % n) ims, txts, links = [], [], [] for label, image_numpy in visuals.items(): image_numpy = util.tensor2im(image) img_path = 'epoch%.3d_%s.png' % (n, label) ims.append(img_path) txts.append(label) links.append(img_path) webpage.add_images(ims, txts, links, width=self.win_size) webpage.save()
def main(): # Get the list of projects, number of files (ana and non-ana), number of # events (ana and non-ana), and disk usage, and parents dataset_reader = DatasetReader() project_reader = ProjectReader() projects = project_reader.list_datasets() # projects = ("test1", "test2") h = html.HTML() table = h.table(border='1') header = table.tr header.th("Project") header.th("ID") header.th("File Count") header.th("File Count (Ana)") header.th("Event Count") header.th("Event Count (Ana)") header.th("Disk Usage") header.th("Disk Usage (Ana)") header.th("Parents") total_file_count = 0 total_file_count_ana = 0 total_event_count = 0 total_event_count_ana = 0 total_disk_usage = 0 total_disk_usage_ana = 0 for project in projects: project = project[0] print project row = table.tr row.td("{0}".format(project)) project_id = project_reader.dataset_ids(project) row.td("{0}".format(project_id)) file_count = dataset_reader.count_files(dataset=project, type=0) row.td("{0}".format(file_count)) if file_count is not None: total_file_count += file_count file_count_ana = dataset_reader.count_files(dataset=project, type=1) row.td("{0}".format(file_count_ana)) if file_count_ana is not None: total_file_count_ana += file_count_ana event_count = dataset_reader.sum(dataset=project, target='nevents', type=0) row.td("{0}".format(event_count)) if event_count is not None: total_event_count += event_count event_count_ana = dataset_reader.sum(dataset=project, target='nevents', type=1) row.td("{0}".format(event_count_ana)) if event_count_ana is not None: total_event_count_ana += event_count_ana disk_usage = dataset_reader.sum(dataset=project, target='size', type=0) row.td("{0}".format(bytes_2_human_readable(disk_usage))) if disk_usage is not None: total_disk_usage += disk_usage disk_usage_ana = dataset_reader.sum(dataset=project, target='size', type=1) row.td("{0}".format(bytes_2_human_readable(disk_usage_ana))) if disk_usage_ana is not None: total_disk_usage_ana += disk_usage_ana parents = project_reader.direct_parents(dataset_id=project_id) row.td("{0}".format(parents)) row = table.tr(style="font-weight:bold") row.td("Total:") row.td("-") row.td("{0}".format(total_file_count)) row.td("{0}".format(total_file_count_ana)) row.td("{0}".format(total_event_count)) row.td("{0}".format(total_event_count_ana)) row.td("{0}".format(bytes_2_human_readable(total_disk_usage))) row.td("{0}".format(bytes_2_human_readable(total_disk_usage_ana))) row.td("-") with open("harvard_projects_summary.html", "w") as html_file: html_file.write(str(h))
def CreateHtmlPageAndBody(): page = html.HTML('html') page.head('<meta charset="UTF-8">', escape=False) body = page.body() return page, body
def display_current_results(self, visuals, epoch, save_result): """Display current results on visdom; save current results to an HTML file. Parameters: visuals (OrderedDict) - - dictionary of images to display or save epoch (int) - - the current epoch save_result (bool) - - if save the current results to an HTML file """ if self.display_id > 0: # show images in the browser using visdom ncols = self.ncols if ncols > 0: # show all the images in one visdom panel ncols = min(ncols, len(visuals)) h, w = next(iter(visuals.values())).shape[:2] table_css = """<style> table {border-collapse: separate; border-spacing: 4px; white-space: nowrap; text-align: center} table td {width: % dpx; height: % dpx; padding: 4px; outline: 4px solid black} </style>""" % (w, h) # create a table css # create a table of images. title = self.name label_html = '' label_html_row = '' images = [] idx = 0 for label, image in visuals.items(): image_numpy = util.tensor2im(image) label_html_row += '<td>%s</td>' % label images.append(image_numpy.transpose([2, 0, 1])) idx += 1 if idx % ncols == 0: label_html += '<tr>%s</tr>' % label_html_row label_html_row = '' white_image = np.ones_like(image_numpy.transpose([2, 0, 1 ])) * 255 while idx % ncols != 0: images.append(white_image) label_html_row += '<td></td>' idx += 1 if label_html_row != '': label_html += '<tr>%s</tr>' % label_html_row try: self.vis.images(images, nrow=ncols, win=self.display_id + 1, padding=2, opts=dict(title=title + ' images')) label_html = '<table>%s</table>' % label_html self.vis.text(table_css + label_html, win=self.display_id + 2, opts=dict(title=title + ' labels')) except VisdomExceptionBase: self.create_visdom_connections() else: # show each image in a separate visdom panel; idx = 1 try: for label, image in visuals.items(): image_numpy = util.tensor2im(image) self.vis.image(image_numpy.transpose([2, 0, 1]), opts=dict(title=label), win=self.display_id + idx) idx += 1 except VisdomExceptionBase: self.create_visdom_connections() if self.use_html and ( save_result or not self.saved ): # save images to an HTML file if they haven't been saved. self.saved = True # save images to the disk for label, image in visuals.items(): image_numpy = util.tensor2im(image) img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label)) util.save_image(image_numpy, img_path) # update website webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=1) for n in range(epoch, 0, -1): webpage.add_header('epoch [%d]' % n) ims, txts, links = [], [], [] for label, image_numpy in visuals.items(): image_numpy = util.tensor2im(image) img_path = 'epoch%.3d_%s.png' % (n, label) ims.append(img_path) txts.append(label) links.append(img_path) webpage.add_images(ims, txts, links, width=self.win_size) webpage.save()
max_iter = config['max_iter'] display_size = config['display_size'] config['vgg_model_path'] = opts.output_path # creat model trainer = create_model(opts, config) trainer.cuda() # creat data loader test_loader_a, test_loader_b = get_test_data_loaders(opts, config) # whether continue train trainer.load_model_dict(opts) # create website web_dir = os.path.join( opts.output_folder, opts.phase + '_sync' if opts.synchronized else opts.phase) webpage = html.HTML( web_dir, 'Training = %s, Phase = %s, G = %s, E = %s' % ('contour2shirt', opts.phase, opts.G_path, opts.E_path)) # start test for it, (images_a, images_b) in enumerate(zip(test_loader_a, test_loader_b)): trainer.update_learning_rate() images_a, images_b = images_a.cuda(), images_b.cuda() trainer.set_input(images_a, images_b, config) encoded_output, _ = trainer.test_encoded() # add input domain A (contour) image to list input = images_a[:, :, :, 0:256] all_images = [to_img(input)] all_names = ['input'] # add input domain B (ground truth) image to list ground = images_b[:, :, :, 0:256] all_images.append(to_img(ground))
('', "Man's Search for Meaning by Viktor Frankl"), ('', 'The Lost City of Z by David Grann'), ('', 'The Slow Regard of Silent Things by Patrick Rothfuss'), ('', 'Red Rising by Pierce Brown'), ('', 'Golden Son by Pierce Brown'), ('', 'Morning Star by Pierce Brown'), ('', 'Iron Gold by Pierce Brown'), ('', "The Great War for New Zealand by Vincent O'Malley"), ('', 'Blood Meridian by Cormac McCarthy'), ('', 'The Luminaries by Eleanor Catton'), ] summary_tables['executive_summary'] = executive_summary summary_tables_html = {} for k, v in summary_tables.iteritems(): h = html.HTML() t = h.table(border='1') for row in v: r = t.tr() for item in row: if k == fg_facebook_name and item == row[-1]: r.td(h.a(item, href=item), style='padding:10px', escape=False) else: r.td(item, style='padding:10px') summary_tables_html[k] = '{}'.format(t) if __name__ == '__main__': map_filename = os.path.join(BASE_DIR, 'usa.rendered_map.html') print 'Writing map HTML to file: {}'.format(map_filename) with open(map_filename, 'w'): m.save(map_filename)
def __init__(self, path): self.htmltitle, self.filename = readpath(path) self.time = currenttime() self.dict = dict self.htmltest = html.HTML(self.htmltitle, self.filename)