def getHTMLTeamTable(self, div_age, div_gen, team_id): # https://pypi.python.org/pypi/html/ return_dict = self.get_schedule('team_id', team_id, div_age=div_age, div_gen=div_gen) game_list = return_dict['game_list'] html = HTML() table = html.table(width='100%', border='1px solid black') table.caption(self.userid_name + " " + self.schedcol_name + " " + div_age + div_gen + str(team_id)) header_row = table.tr header_row.th('Game Date', padding='5px') header_row.th('Start Time', padding='5px') header_row.th('Field', padding='5px') header_row.th('Home', padding='5px') header_row.th('Away', padding='5px') for game in game_list: game_row = table.tr game_row.td(game['game_date']) game_row.td(game['start_time']) findex = self.fieldinfo_indexerGet(game['venue']) if findex is not None: field_name = self.fieldinfo_list[findex]['field_name'] game_row.td(field_name) game_row.td(str(game['home'])) game_row.td(str(game['away'])) return str(html)
def log_results(self, output_format, results): if output_format == 'json': self.logger.info(pprint.pformat(results)) elif output_format == 'html': h = HTML() t = h.table(border='1') for key, value in results.items(): if isinstance(value, dict): r = t.tr r.td(str(key)) for k, v in value.items(): r = t.tr r.td() r.td(str(k)) r.td(str(v)) else: r = t.tr r.td(key) r.td(str(value)) with open('results.html','w') as f: f.write(str(t)) else: for row, data in results.items(): if isinstance(data, dict): self.logger.info(str(row).ljust(50)) for k, v in data.items(): self.logger.info(str(k).ljust(50) + str(v)) else: self.logger.info(str(row).ljust(50) + str(data))
def create_html_report(topwords, top_index, Pi): ht = HTML() div = ht.div(align="center") heading = div.h2(style="text-align:center") subhead = div.h3(style="text-align:center") # Creating the header No_of_topics = len(topwords) - 1 No_of_clusters = top_index.shape[0] head = "Topics : %d \n, Cluster : %d" % (No_of_topics, No_of_clusters) heading(head) # Creating the table for i in range(top_index.shape[0]): head_sub = div.h4(style="text-align:center") head_sub("Top Topics in Cluster %d (%.5f)" % (i + 1, Pi[i])) t = div.table(align="center", border="2", cellpadding="12", cellspacing="0", width="80%") tb = t.tbody() tr = tb.tr(style="text-align:center") tr.th("Alpha") tr.th("Top Words") ind_alpha_index = top_index[i] for index in ind_alpha_index: tr = tb.tr(style="text-align:center") alpha_val = '%.5f' % index[1] tr.td(alpha_val) tr.td(topwords[int(index[0])]) return (ht)
def saveResultPagePool(loadPath): # image, task, converation, GT, pred page = HTML(4); page.setTitle(['Pool', 'GT', 'Conversation', 'Pred']); savePath = loadPath.replace('json', 'html').replace('chatlog', 'chatpage'); with open(loadPath, 'r') as fileId: talk = json.load(fileId); maps = {}; cleaner = lambda x: maps[x] if x in maps else x; for datum in talk: datum['pool'] = [[cleaner(jj) for jj in ii] \ for ii in datum['pool']]; datum['gt'] = [cleaner(ii) for ii in datum['gt']]; datum['pred'] = [cleaner(ii) for ii in datum['pred']]; datum['chat'] = [cleaner(ii) for ii in datum['chat']]; row = ['\n'.join([', '.join(ii) for ii in datum['pool']])]; row.append(', '.join(datum['gt'])); # add chat chat = 'Q1 : %3s \tA1: %s ' % (datum['chat'][0], datum['chat'][1]); if len(datum['chat']) > 3: chat += '\tQ2 : %3s \t A2: %s' % (datum['chat'][2], datum['chat'][3]); row.append(chat) # add GT and pred row.append(', '.join(datum['pred'])); page.addRow(row); # render and save page page.savePage(savePath);
def format_exception(self): etype, evalue, etb = self.exception_info python_version = 'Python ' + sys.version.split( )[0] + ': ' + sys.executable date = time.ctime(time.time()) html_page = HTML() frames = [] records = inspect.getinnerframes(etb, self.no_of_tb_steps) for frame, file, lnum, func_name, lines, index in records: file_name = '' if file: file_name = os.path.abspath(file) args, varargs, varkw, locals = inspect.getargvalues(frame) function_args = inspect.formatargvalues(args, varargs, varkw, locals) def reader(line_num=lnum): try: return linecache.getline(file, line_num) finally: line_num += 1 vars = scanvars(reader, frame, locals) if index: i = lnum - index for line in lines: if i == lnum: continue i += 1 frames.append({ 'file_name': file_name, 'function_args': function_args, 'locals': vars })
def savePredictedTags(scores, groundTruth, dataloader, topN=20): # local aliases gtLabels, imgIds = groundTruth['gtLabels'], groundTruth['imageId'] # Create a page with 3 columns page = HTML(3) page.setTitle(['Image', 'Ground Truth', 'Predicted Tags']) imgPath = 'val2014/COCO_val2014_%012d.jpg' numImgs = 100 for ii in xrange(numImgs): rowContent = [page.linkImage(imgPath % imgIds[ii])] #data = dataloader.getIndexInstance(ii, 'test'); # set #setData = data[:dataloader.evalSize]; #setWords = [dataloader.ind2word[str(setData[jj])] \ # for jj in xrange(setData.size(0))]; #rowContent.append('\n'.join(setWords)); # gt words, scores, ranks #gtData = data[dataloader.evalSize:]; gtWords = [dataloader.ind2word[jj] for jj in gtLabels[ii]] rowContent.append('\n'.join(gtWords)) # Get the predicted tags imgScore = scores[ii, :] predTags = imgScore.argsort()[-topN:][::-1] tags = [dataloader.ind2word[jj] for jj in predTags] rowContent.append('\n'.join(tags)) page.addRow(rowContent) # render page and save page.savePage('img_tags_espgame.html')
def visualizeBatch(dataloader): # local alias dl = dataloader # get batch batch = dl.getTrainBatch() # create a html page page = HTML(4) imgPath = 'train2014/COCO_train2014_%012d.jpg' # Get the unique image locations imgSum = batch['image'].sum(1).numpy() curSum = imgSum[0] count = 0 for ii in xrange(imgSum.shape[0]): if curSum != imgSum[ii]: count += 1 curSum = imgSum[ii] # New row # add image, set, pos, neg examples row = [page.linkImage(imgPath % batch['imageId'][count])] setWords = [dl.ind2word[jj] for jj in list(batch['set'][ii])] row.append(', '.join(setWords)) row.append(dl.ind2word[batch['pos'][ii, 0]]) row.append(dl.ind2word[batch['neg'][ii, 0]]) # add the row page.addRow(row) # render page page.savePage('visualize_batch.html')
def saveResultPage(loadPath): # image, task, converation, GT, pred page = HTML(5); page.setTitle(['Image', 'Task', 'Conversation', 'GT', 'Pred']); savePath = loadPath.replace('json', 'html').replace('chatlog', 'chatpage'); with open(loadPath, 'r') as fileId: talk = json.load(fileId); maps = {'rectangle':'triangle', 'rhombus':'star', 'cyan':'purple'}; #'A':' I', 'B':' II', 'C':'III'}; cleaner = lambda x: maps[x] if x in maps else x; for datum in talk: datum['image'] = [cleaner(ii) for ii in datum['image']]; datum['gt'] = [cleaner(ii) for ii in datum['gt']]; datum['pred'] = [cleaner(ii) for ii in datum['pred']]; datum['chat'] = [cleaner(ii) for ii in datum['chat']]; row = [', '.join(datum['image']), ', '.join(datum['task'])]; # add chat chat = 'Q1 : %3s \tA1: %s ' % (datum['chat'][0], datum['chat'][1]); if len(datum['chat']) > 3: chat += '\tQ2 : %3s \t A2: %s' % (datum['chat'][2], datum['chat'][3]); row.append(chat) # add GT and pred row.extend([', '.join(datum['gt']), ', '.join(datum['pred'])]); page.addRow(row); # render and save page page.savePage(savePath);
def preload_auto_modes(self): options = [] h = HTML() for id, type, description in _util.getScenarioModes(self.Scenario, ['AUTO', 'AUX_AUTO']): text = "%s - %s" %(id, description) options.append(str(h.option(text, value= id))) return "\n".join(options)
def buildTrivialStatsTable(self, deltaSeriesCollection, klass=TRIVIAL_STATS_TABLE, style=''): """ Builds a html table with statistics for a collection of transactions :param deltaSeriesCollection: A series of elapsed time or pmc values for a pair of probes :param klass: Css selector for this table (Default value = TRIVIAL_STATS_TABLE) :param style: Css inline style attributes for this table (Default value = '') """ tableWrapper = HTML().div() klass = '{} {}'.format(TABLE_SUMMARY, klass) table = tableWrapper.table(border='1', klass=klass, style=style) self.buildStatsTableHeader(table) tbody = table.tbody for i, deltaSeries in enumerate(deltaSeriesCollection, 1): row = tbody.tr row.td('{0:,}'.format(i), klass=TD_KEY) row.td(deltaSeries.beginProbeName, klass=TD_KEY) row.td(deltaSeries.endProbeName, klass=TD_KEY) row.td(DURATION_FORMAT.format(deltaSeries.getMin())) row.td(DURATION_FORMAT.format(deltaSeries.getMax())) row.td(DURATION_FORMAT.format(deltaSeries.getMedian())) row.td(DURATION_FORMAT.format(deltaSeries.getMean())) row.td( DURATION_FORMAT.format( deltaSeries.getPercentile(self.percentile1))) row.td( DURATION_FORMAT.format( deltaSeries.getPercentile(self.percentile2))) row.td(DURATION_FORMAT.format(deltaSeries.getStandardDeviation())) return tableWrapper
def html_line(text, plus=0): h = HTML() p = "" if (plus): p = plus h.p(str(text) + p) f.write(str(h))
def _WriteErrorReport(self, errorTable): h = HTML() t = h.table() tr = t.tr() tr.th("Line ID") tr.th("Error Message") tr.th("Error Details") for lineId, errorMsg, errorDetail in errorTable: tr = t.tr() tr.td(lineId) tr.td(errorMsg) tr.td(str(errorDetail)) pb = _m.PageBuilder(title= "Error Report") headerText = "<b>Source Emmebank:</b> %s" %self.SourceEmmebankPath +\ "<br><b>Source Scenario:</b> %s" %self.SourceScenarioId +\ "<br><b>Target Scenario:</b> %s" %self.TargetScenario pb.add_text_element(headerText) pb.wrap_html(body= str(t)) _m.logbook_write("Error report", value= pb.render()) pass
def buildDifferentialStatsTable(self, deltaSeriesCollection, refDsc, klass, style): """ Builds a table with statistics for current profile session side by side with benchmarks :param deltaSeriesCollection: A series of elapsed time or pmc values for a pair of probes :param refDsc: Reference delta series collection for the current profile session :param klass: Css selector for this table (Default value = TRIVIAL_STATS_TABLE) :param style: Css inline style attributes for this table (Default value = '') """ from xpedite.report import getDeltaMarkup, getDeltaType klass = '{} {}'.format(TABLE_SUMMARY, klass) table = HTML().table(border='1', klass=klass, style=style) self.buildStatsTableHeader(table) tbody = table.tbody fmt = DURATION_FORMAT + ' ({1}' + DURATION_FORMAT_2 + ')' for i, deltaSeries in enumerate(deltaSeriesCollection, 1): row = tbody.tr row.td('{0:,}'.format(i), klass=TD_KEY) row.td(deltaSeries.beginProbeName, klass=TD_KEY) row.td(deltaSeries.endProbeName, klass=TD_KEY) delta = deltaSeries.getMin() - refDsc[i - 1].getMin() row.td(fmt.format(deltaSeries.getMin(), getDeltaMarkup(delta), delta), klass=getDeltaType(delta)) delta = deltaSeries.getMax() - refDsc[i - 1].getMax() row.td(fmt.format(deltaSeries.getMax(), getDeltaMarkup(delta), delta), klass=getDeltaType(delta)) delta = deltaSeries.getMedian() - refDsc[i - 1].getMedian() row.td(fmt.format(deltaSeries.getMedian(), getDeltaMarkup(delta), delta), klass=getDeltaType(delta)) delta = deltaSeries.getMean() - refDsc[i - 1].getMean() row.td(fmt.format(deltaSeries.getMean(), getDeltaMarkup(delta), delta), klass=getDeltaType(delta)) percentile1 = deltaSeries.getPercentile(self.percentile1) delta = percentile1 - refDsc[i - 1].getPercentile(self.percentile1) row.td(fmt.format(percentile1, getDeltaMarkup(delta), delta), klass=getDeltaType(delta)) percentile2 = deltaSeries.getPercentile(self.percentile2) delta = percentile2 - refDsc[i - 1].getPercentile(self.percentile2) row.td(fmt.format(percentile2, getDeltaMarkup(delta), delta), klass=getDeltaType(delta)) delta = deltaSeries.getStandardDeviation() - refDsc[ i - 1].getStandardDeviation() row.td(fmt.format(deltaSeries.getStandardDeviation(), getDeltaMarkup(delta), delta), klass=getDeltaType(delta)) return table
def buildEnvironmentTable(app): """ Builds a table with environment details :param app: an instance of xpedite app, to interact with target application """ table = HTML().table(border='1', klass=TABLE_ENV) heading = table.thead.tr heading.th('No') heading.th('Parameter') heading.th('Value') details = [('application', app.name), ('report time', strftime('%Y-%m-%d %H:%M:%S', gmtime())), ('host', app.ip), ('pid', app.pid), ('user', getpass.getuser()), ('os', app.getOsUname()), ('os boot param', app.getBootParam())] tbody = table.tbody for i, (k, val) in enumerate(details): row = tbody.tr row.td('{:,}'.format(i + 1), klass=TD_KEY) row.td('{} '.format(k), klass=TD_KEY) row.td('{} '.format(val)) return table
def _create_page(self, link_prefix=""): h = HTML('html', '') h.h1("Cloud: %s" % self.cloud_name) head = h.head() head.link(rel="stylesheet", type="text/css", href="%sardana.css" % link_prefix) heading = h.h2() heading.a("Control Plane View", href="%sControl_Planes.html" % link_prefix) heading.text(SP * 10, escape=False) heading.a("Region View", href="%sRegions.html" % link_prefix) heading.text(SP * 10, escape=False) heading.a("Service View", href="%sServices.html" % link_prefix) heading.text(SP * 10, escape=False) heading.a("Network View", href="%sNetworks.html" % link_prefix) heading.text(SP * 10, escape=False) heading.a("Server View", href="%sServer_View.html" % link_prefix) heading.text(SP * 10, escape=False) heading.a("Server Groups View", href="%sServer_Groups.html" % link_prefix) heading.text(SP * 10, escape=False) h.br return h
def main(args): win_width = 300 win_height = 300 webpage = HTML('web/', args.title) webpage.add_header(args.title) for i, dir in tqdm(enumerate(os.listdir('web/'+ args.img_dir))): imgs = [] txts = [] for image_path in os.listdir('web/{}/{}'.format(args.img_dir, dir)): img = os.path.join('{}/{}'.format(args.img_dir,dir),image_path) imgs.append(img) imgs.sort() for img_path in imgs: if img_path.split('/')[-1] == '0.jpg': txts.append('Original Image') elif img_path.split('/')[-1] == '1.jpg': txts.append('Background') else: txts.append('Completed Object') webpage.add_images(imgs, txts, imgs, i+1, width=win_width, height=win_height) webpage.save()
def gen_html_report(crash_dict, apk_file, simple_log): h = HTML() h.h2('Monkey Test Report') h.li(datetime.now().strftime('%Y-%m-%d %H:%M:%S')) h.li(apk_file) h.br t = h.table(border='1') r = t.tr r.th('Crash Long Msg') r.th('Crash Times') for k, v in crash_dict.iteritems(): r = t.tr r.td(k) r.td(str(v)) h.h6('Logs') h.pre() for l in simple_log: h.code(str(l)) h.pre() with open('monkey_test_report.html', 'w') as r: r.write(str(h))
def create_binary_correlation_stat_html(self, output_dir, roidb=None): from html import HTML # Create the directory if necessary if not osp.exists(output_dir): os.makedirs(output_dir) present_cache_file = osp.join(self.cache_path, self.name + '_present_stats.pkl') assert os.path.exists(present_cache_file) with open(present_cache_file, 'rb') as fid: present_stats = cPickle.load(fid) print '{} present stats loaded from {}'.format(self.name, present_cache_file) config_html = HTML() config_table = config_html.table(border='1') for i in xrange(self.num_classes): r = config_table.tr if i == 0: r.th('---') else: r.th('%s' % self.classes[i]) for j in xrange(1, self.num_classes): c = r.td if i == 0: c.a('%s' % self.classes[j]) else: c.a('%d' % int(present_stats[i, j]), href='images/%02d_%02d.jpg' % (i, j)) html_file = open(osp.join(output_dir, 'coco_offsets_table.html'), 'w') print >> html_file, config_table html_file.close()
def buildFlotTitle(category, title, timelineStats, uid): """ Builds markup to render title for txn visualizations :param category: Category of transactions visualized by this flot :param title: Text title for this visualization :param timelineStats: Timeline stats with delta series to be plotted :param uid: Unique identifier to generate css selector id """ constituentNames = [ '{} -> {}'.format(deltaSeries.beginProbeName, deltaSeries.endProbeName) for deltaSeries in timelineStats.getTscDeltaSeriesCollection() ] title = '{} {} charts {}'.format( category, title, ' for constituent - ' if constituentNames else '') element = HTML().div(klass=TIME_POINT_STATS_TITLE) element.h3(title, style='display: inline') if constituentNames: elementId = '{}ConstituentSelector'.format(uid) constituentSelector = element.select(id=elementId, klass=SELECTOR) for i, constituentName in enumerate(constituentNames): if i == len(constituentNames) - 1: constituentSelector.option(constituentName, selected='selected') else: constituentSelector.option(constituentName) return element
def make_html(text): h = HTML() p = h.p for line in text.splitlines(): p += line p.br return p
def page(self): pb = _tmgTPB.TmgToolPageBuilder( self, title="TMG Toolbox Index", description="Lists all tools and libraries within the TMG Toolbox, \ alphabetically by tool name, with links to each tool.", branding_text="- TMG Toolbox", runnable=False) tmg = [tb for tb in _MODELLER.toolboxes if tb.namespace() == 'tmg'][0] toolNames = self.get_tool_names(tmg) topCategories = self.get_top_categories(tmg) alphabetizedToolNames = {} for name, namespacce in toolNames: firstChar = name[0].upper() if firstChar in alphabetizedToolNames: alphabetizedToolNames[firstChar].append((name, namespacce)) else: alphabetizedToolNames[firstChar] = [(name, namespacce)] orderedKeys = [key for key in alphabetizedToolNames.iterkeys()] orderedKeys.sort() for firstChar in orderedKeys: #pb.add_header(firstChar) toolNames = alphabetizedToolNames[firstChar] h = HTML() t = h.table(style='border-style:none;', width='100%') tr = t.tr() tr.th(firstChar, colspan='3', align='left') for name, namespace in toolNames: #Get description from the code tool = _MODELLER.tool(namespace) if hasattr(tool, 'short_description'): description = tool.short_description() else: description = "<em>--No description--</em>" #Determine the top-level category topNamespace = namespace.split('.')[1] if topNamespace in topCategories: category = topCategories[topNamespace] else: continue #Skip top-level tool #Add data to table tr = t.tr() tr.td("<em>%s</em>" % category, escape=False, width='20%') link = '<a data-ref="%s" class="-inro-modeller-namespace-link" style="text-decoration: none;">' % namespace link += name + "</a>" tr.td(link, escape=False, width='40%') tr.td(description, escape=False, align='left') pb.wrap_html(body=str(t)) return pb.render()
def buy_used(self, buy_info, used_info): alarm_num = 0 used_info = Counter(all_type_list) buy_info = Counter(all_type_pur) # print used_info, buy_info, type(used_info) all_info = Counter(used_info + buy_info) # print all_info # print list(all_info) from html import HTML inline_css = { 'class1': 'color:#00FF00;width:500;valign:middle;vertical-line:top;', #green 'class2': 'color:#FF0000;width:500;valign:middle;vertical-line:top;', #red 'class3': 'color:#FFFF00;width:500;valign:middle;vertical-line:top;', #yellow 'class4': 'color:#000000;width:500;valign:middle;vertical-line:top;', #black } b = HTML() t = b.table(border='1px solid black') r = t.tr() t2 = t.tr() t3 = t.tr() r.td('Type', style=inline_css['class4']) t2.td('Purd', style=inline_css['class4']) t3.td('Used', style=inline_css['class4']) for m_title in list(all_info): r.td(str(m_title), style=inline_css['class4']) if m_title in dict(buy_info).keys(): # print dict(buy_info)[m_title] t2.td(str(dict(buy_info)[m_title]), style=inline_css['class1']) else: # print 'no key' t2.td('Null', style=inline_css['class2']) if m_title in dict(used_info).keys(): # print dict(used_info)[m_title] t3.td(str(dict(used_info)[m_title]), style=inline_css['class1']) else: # print 'nn key' t3.td('Null', style=inline_css['class2']) if m_title in dict(buy_info).keys() and m_title in dict( used_info).keys(): if dict(buy_info)[m_title] < dict(used_info)[m_title]: # print 'haha %s' % m_title alarm_num = alarm_num + 1 elif m_title not in dict(buy_info).keys() and m_title in dict( used_info).keys(): # print 'used but not buy: %s' % m_title alarm_num = alarm_num + 1 # elif m_title in dict(buy_info).keys() and m_title not in dict(used_info).keys(): # print 'buy but not used: %s' % m_title alarm_info_ec2 = {'status': str(alarm_num), 'report': str(b)} # print alarm_info_ec2 return alarm_info_ec2
def makeReport(DbMan, target_list, exclude_list, openvas_enabled): try: tar_result = [] exclude_result = [] logger.debug("Targets: " + str(target_list)) logger.debug("Excludes: " + str(exclude_list)) if not target_list == "": for target in target_list.split(','): logger.debug("Target info: " + str(target)) result = DbMan.getHostInfo(target) logger.debug("Target info in db: " + str(result)) tar_result.append(result) else: tar_result = None if not exclude_list == "": for exclude in exclude_list.split(','): exclude_result.append(DbMan.getHostInfo(exclude)) else: exclude_result = None logger.debug("Tar Array: " + str(tar_result)) logger.debug("Exclude Array: " + str(exclude_result)) if not tar_result is None: logger.debug("Making targets table") tar_tab = makeTable(tar_result) logger.debug("Targets Table: " + str(tar_tab)) else: tar_tab = None if not exclude_result is None: logger.debug("Making Excluded Table") exclude_tab = makeTable(exclude_result) logger.debug("Excluded Table: " + str(exclude_tab)) else: exclude_tab = None logger.debug("Exclude Table Value:" + str(exclude_tab)) h = HTML('html') po = h.p(escape=False) if openvas_enabled: po += "<b>This report was generated based on NMAP and Openvas Scanning</b><br />" else: po += "<b>This report was generated based on NMAP Scanning</b><br />" if not tar_tab is None: p = h.p(escape=False) p += "<b>Targets Scanned:</b><br />" p += tar_tab if not exclude_tab is None: p2 = h.p(escape=False) p2 += "<br /><b>Hosts Excluded:</b><br />" p2 += exclude_tab p3 = h.p(escape=False) p3 += "<br />This message is automatically generated. Report errors to the Office of Information Security.<br />" return str(h) except Exception, e: logger.warning("The following error occured during report creation: " + str(e))
def buildEnvironmentReportFile(self, app, repo, resultOrder, classifier, txnFilter, benchmarkPaths): """ Creates a file to store the markup for environment details :param app: an instance of xpedite app, to interact with target application :param repo: repository of transactions for current profiling sessions and benchmarks :param resultOrder: Sort order of transactions in latency constituent reports :param classifier: Predicate to classify transactions into different categories :param txnFilter: Lambda to filter transactions prior to report generation :param benchmarkPaths: List of stored reports from previous runs, for benchmarking """ from xpedite.report.profileInfo import ProfileInfoReportBuilder from xpedite.report.tabs import (TAB_HEADER_FMT, TAB_BODY_FMT, TAB_BODY_PREFIX, TAB_BODY_SUFFIX, TAB_JS, TAB_CONTAINER_FMT, tabState, tabContentState) envTable = self.buildEnvironmentTable(app) cpuInfoTable = self.buildCpuInfoTable(app) hostReport = '' if envTable: title = HTML().h3('Test Environment parameters') hostReport += str(title) + str(envTable) if cpuInfoTable: title = HTML().h3('Test Environment cpu info') hostReport += str(title) + str(cpuInfoTable) profileReport = ProfileInfoReportBuilder().buildProfileInfoReportFile( app, repo, resultOrder, classifier, txnFilter, benchmarkPaths) tabHeader = TAB_HEADER_FMT.format('hostInfo', tabState(True), 'Host Info') tabHeader += TAB_HEADER_FMT.format('profileInfo', tabState(False), 'Profile Info') envBodyClass = 'envInfoBody ' tabBody = TAB_BODY_FMT.format('hostInfo', envBodyClass + tabContentState(True), hostReport) tabBody += TAB_BODY_FMT.format('profileInfo', envBodyClass + tabContentState(False), profileReport) tabBody = TAB_BODY_PREFIX + tabBody + TAB_BODY_SUFFIX report = (HTML_BEGIN + TAB_CONTAINER_FMT.format(tabHeader, tabBody) + TAB_JS + HTML_END) return report
def write_story(self): #This is where we add the tags h = HTML() h.html() h.body() h.div(klass="Story", id=self.title) for (x, p) in enumerate(self.pages): # print x, p.text, p.choices # exit() h.div(klass="Page", value=str(x)) if isinstance(p.text, unicode): h += HTML(text=unicodedata.normalize('NFKD', p.text).encode( 'ascii', 'ignore')) else: h += HTML(text=p.text) for (k, choice) in enumerate(p.choices): if isinstance(choice, unicode): choice = unicodedata.normalize('NFKD', choice).encode( 'ascii', 'ignore') h.div(choice, klass="Choice", value=str(k)) h += HTML('/div') pass h += HTML('/div') h += HTML('/body') h += HTML('/html') soup = bs(str(h)) return soup.prettify()
def __init__(self, name, local_jquery=None, local_requirejs=None, local_d3_js=None, local_c3_js=None, local_c3_css=None): super(Chart, self).__init__() self.chart_html = HTML() self.name = name self.c3_css_path = local_c3_css or 'https://cdnjs.cloudflare.com/ajax/libs/c3/0.4.10/c3.min.css' self.jquery_path = local_jquery or 'http://ajax.googleapis.com/ajax/libs/jquery/1.11.2/jquery.min.js' self.requirejs_path = local_requirejs or \ 'https://cdnjs.cloudflare.com/ajax/libs/require.js/2.1.17/require.min.js' self.d3_js_path = local_d3_js or 'http://d3js.org/d3.v3.min' self.c3_js_path = local_c3_js or 'https://cdnjs.cloudflare.com/ajax/libs/c3/0.4.10/c3.min' self.chart_html.div('', id=self.name) self.chart_html.link('', href=self.c3_css_path, rel='stylesheet', type='text/css') self.chart_html.script('', src=self.jquery_path) self.chart_html.script('', src=self.requirejs_path) self.requirejs_config = { 'paths': { 'c3': self.c3_js_path, 'd3': self.d3_js_path, } } self.axes = Axes() self.data = Data(self.axes) self.grid = Grid(self.axes) self.legend = Legend() self.tooltip = Tooltip() self.regions = Regions(self.axes) self.point = Point() self.size = Size() self.padding = Padding() self.chart_dict = { 'bindto': "'#" + self.name + "'", 'data': self.data.config, 'axis': self.axes.config, 'grid': self.grid.config, 'legend': self.legend.config, 'tooltip': self.tooltip.config, 'regions': self.regions.config, 'point': self.point.config, 'size': self.size.config, 'padding': self.padding.config, }
def render_list(lst, h=HTML()): t = h for el in lst: if isinstance(el, list): render_list(el, t) else: f = ent_swch.get(el.tag, default_handler) t = f(el, h) return h
def buildDiffTitle(lhs, rhs): """ Builds a title for the transaction diff table """ from xpedite.report.markup import TIME_POINT_STATS_TITLE title = 'Transaction diff\ntxn #{} vs txn #{}'.format(lhs, rhs) element = HTML().div(klass=TIME_POINT_STATS_TITLE) element.h3(title) return element
def _AddReportSection(self, pb, type, modes, fountains, sinks, orphans): modes = [str(mode) for mode in modes] h = HTML() plural = '' if len(modes) > 1: plural = "s" sectionTitle = "{0} results for mode{1} {2!s}".format( type, plural, modes) #h.h3(sectionTitle) nFountains = len(fountains) nSinks = len(sinks) nOrphans = len(orphans) if nFountains > 0: plural = '' if nFountains > 1: plural = 's' title = "Found %s fountain node%s:" % (nFountains, plural) t = h.table() tr = t.tr() tr.th(title) for node in fountains: t.tr().td(str(node)) if nSinks > 0: plural = '' if nSinks > 1: plural = 's' title = "Found %s sink node%s:" % (nSinks, plural) t = h.table() tr = t.tr() tr.th(title) for node in sinks: t.tr().td(str(node)) if nOrphans > 0: plural = '' if nOrphans > 1: plural = 's' title = "Found %s orphan node%s:" % (nOrphans, plural) t = h.table() tr = t.tr() tr.th(title) for node in orphans: t.tr().td(str(node)) pb.wrap_html(sectionTitle, body=str(h))
def buildDiffTable(self, lhs, rhs): # pylint: disable=too-many-locals """ Constructs the HTML table to show the diff of 2 transactions :param lhs: The timeline from conflating the two input timelines :type lhs: xpedite.analytics.timeline.Timeline :param rhs: The timeline retrieved from the second transaction ID input in the Jupyter command :type rhs: xpedite.analytics.timeline.Timeline :param profiles: Profiles build by Xpedite profiling :type profiles: xpedite.report.profile.Profiles """ from xpedite.report.markup import (DURATION_FORMAT, DURATION_FORMAT_2, DELTA_FORMAT_1, DELTA_FORMAT_2, TD_KEY, TABLE_SUMMARY, TRIVIAL_STATS_TABLE, getDeltaMarkup, getDeltaType) klass = '{} {}'.format(TABLE_SUMMARY, TRIVIAL_STATS_TABLE) table = HTML().table(border='1', klass=klass) diffReport = str(self.buildDiffTitle(lhs.txnId, rhs.txnId)) self.buildDiffTableHeader(rhs, table) tbody = table.tbody for i in range(0, len(lhs) - 1): durationFmt = DURATION_FORMAT + ' ({1}' + DURATION_FORMAT_2 + ')' deltaFmt = DELTA_FORMAT_1 + ' ({1}' + DELTA_FORMAT_2 + ')' row = tbody.tr row.td('{}'.format(rhs[i].name, klass=TD_KEY)) row.td('{}'.format(rhs[i + 1].name, klass=TD_KEY)) delta = rhs[i].duration - lhs[i].duration row.td( durationFmt.format(rhs[i].duration, getDeltaMarkup(delta), delta), klass=getDeltaType(delta), ) if rhs[i].deltaPmcs: for j, delta in enumerate(rhs[i].deltaPmcs): txnDelta = delta - lhs[i].deltaPmcs[j] row.td(deltaFmt.format(delta, getDeltaMarkup(txnDelta), txnDelta), klass=getDeltaType(txnDelta)) if rhs[i].topdownValues: for j, topdownValue in enumerate(rhs[i].topdownValues): delta = rhs[i].topdownValues[j].value - lhs[ i].topdownValues[j].value row.td(durationFmt.format(topdownValue.value, getDeltaMarkup(delta), delta), klass=getDeltaType(delta)) diffReport += str(table) return diffReport