def dataTable(headings, content, sortable = False, tabid=None): """ Produces a tabular listing which is either sortable or not. Sortable expects headings to be a list of tuples, but if it is not a list of tuples the 'string' type will be assumed for every cell """ if sortable: if isinstance(headings[0], tuple): header = [ tags.th(colformat=j)[i] for j,i in headings ] else: header = [ tags.th(colformat='istr')[i] for i in headings ] tclass = 'sortable' else: header = [ tags.th[i] for i in headings ] tclass = 'listing' if not content: rows = tags.tr[ tags.td(colspan=len(headings))[tags.em["No entries."]]] else: rows = [tags.tr[ [tags.td[col] for col in row] ] for row in content] return tags.table(id = tabid, cellspacing=0, _class=tclass)[ tags.thead(background="/images/gradMB.png")[ tags.tr[ header ] ], tags.tbody[ rows ] ]
def test_athenaIdRewriting(self): """ Test that IDs are correctly rewritten in id, for, and headers attributes. """ tag = [tags.label(_for='foo'), tags.input(id='foo'), tags.th(headers=''), tags.th(headers='foo'), tags.td(headers='foo bar'), tags.td(headers='foo bar baz')] element = athena.LiveElement(docFactory=loaders.stan(tag)) page = athena.LivePage(docFactory=loaders.stan(element)) element.setFragmentParent(page) def _verifyRendering(result): self.assertIn('<input id="athenaid:%s-foo"' % (element._athenaID,), result) self.assertIn('<label for="athenaid:%s-foo"' % (element._athenaID,), result) self.assertIn('<th headers=""', result) self.assertIn('<th headers="athenaid:%s-foo"' % ( element._athenaID,), result) self.assertIn('<td headers="athenaid:%s-foo athenaid:%s-bar"' % ( element._athenaID, element._athenaID), result) self.assertIn('<td headers="athenaid:%s-foo athenaid:%s-bar athenaid:%s-baz"' % ( element._athenaID, element._athenaID, element._athenaID), result) return renderLivePage(page).addCallback(_verifyRendering)
def table(updatingMeasurements): meas = [] for m in updatingMeasurements.measurements: # (but consider beyond my strip, to see approaching traffic) if m['freeway_id'] == '101' and 408 < float(m['abs_pm']) < 412.5: meas.append((float(m['abs_pm']), m)) meas.sort() rows = [ T.tr[T.th(colspan=4, class_="dir-N")['North'], T.th(colspan=4, class_="dir-S")['South']], T.tr[T.th['fwy'], T.th['postmile'], T.th['name'], T.th['speed'], T.th['fwy'], T.th['postmile'], T.th['name'], T.th['speed']] ] for _, m in meas: attr = dict(class_="dir-%s" % m['freeway_dir']) chunk = [ T.td(**attr)[m['freeway_id'] + m['freeway_dir']], T.td(**attr)[m['abs_pm']], T.td(**attr)[m['name']], T.td(**attr)[m['speed']], ] if m['freeway_dir'] == 'N': tds = chunk + [T.td(colspan=4)] else: tds = [T.td(colspan=4)] + chunk rows.append(T.tr[tds]) return T.table[rows]
def table(updatingMeasurements): meas = [] for m in updatingMeasurements.measurements: # (but consider beyond my strip, to see approaching traffic) if m['freeway_id'] == '101' and 408 < float(m['abs_pm']) < 412.5: meas.append((float(m['abs_pm']), m)) meas.sort() rows = [T.tr[T.th(colspan=4, class_="dir-N")['North'], T.th(colspan=4, class_="dir-S")['South']], T.tr[T.th['fwy'], T.th['postmile'], T.th['name'], T.th['speed'], T.th['fwy'], T.th['postmile'], T.th['name'], T.th['speed']]] for _, m in meas: attr = dict(class_="dir-%s" % m['freeway_dir']) chunk = [T.td(**attr)[m['freeway_id'] + m['freeway_dir']], T.td(**attr)[m['abs_pm']], T.td(**attr)[m['name']], T.td(**attr)[m['speed']], ] if m['freeway_dir'] == 'N': tds = chunk + [T.td(colspan=4)] else: tds = [T.td(colspan=4)] + chunk rows.append(T.tr[tds]) return T.table[rows]
def render_bayeux_environments(self): return [ tags.h3()['Bayeux Environmerts'], tags.table()[ tags.tr()[ tags.th()['Port'], tags.td()[lumen.config['port']], ], tags.tr()[ tags.th()['Engine'], tags.td()[lumen.config['engine']], ] ], ]
def formatHeader(self, columns: [str]) -> bytes: self.columns = columns tr = tags.tr()[ [tags.th()[i] for i in self.columns] ] self.header.children.append(tr) return flat.flatten(tr)
def render_channels_list(self): result = tags.table() result.children.append( tags.tr()[ tags.th()['id'], tags.th()['subscribers'], ] ) for clientId, c in channel.channels.items(): result.children.append( tags.tr()[ tags.td()[c.id], tags.td()[len(c.subscribers)], ] ) return result
class HeadCells(rend.Page, HeadCellsMixin): def __init__(self, serManager): self.serManager = serManager docFactory = loaders.stan( T.tr(data=T.directive("fielddefs"), render=rend.sequence) [ T.th(pattern="item", render=T.directive("headCell"), class_="thVertical") ])
def render_clients_list(self): result = tags.table() result.children.append( tags.tr()[ tags.th()['id'], tags.th()['type'], tags.th()['created at'], ] ) for clientId, c in client.clients.items(): result.children.append( tags.tr()[ tags.td()[clientId], tags.td()[c.typename], tags.td()[c.createdAt.strftime("%A, %d. %B %Y %I:%M%p")], ] ) return result
def render_webconsole_environments(self): return [ tags.h3()['Web console Environments'], tags.table()[ tags.tr()[ tags.th()['Port'], tags.td()[lumen.config['webport']], ], ], ]
def render_statistics(self, ctx, data): ret = T.table(border=0, width="100%", cellspacing=5, cellpadding=2) for key in self.pytrans.serviceplugins['Statistics'].stats: label = lang.get("statistics_%s" % key, config.lang) description = lang.get("statistics_%s_Desc" % key, config.lang) row = T.tr[ T.th(align="right")[label + ":"], T.td[self.pytrans.serviceplugins['Statistics'].stats[key]], T.td[description]] ret[row] return ret
def render_statistics(self, ctx, data): ret = tags.table(border=0, width="100%", cellspacing=5, cellpadding=2) for key in self.pytrans.serviceplugins['Statistics'].stats: label = lang.get("statistics_%s" % key, config.lang) description = lang.get("statistics_%s_Desc" % key, config.lang) row = tags.tr[ tags.th(align="right")[label + ":"], tags.td[self.pytrans.serviceplugins['Statistics'].stats[key]], tags.td[description] ] ret[row] return ret
def makeDocFactory(self): return loaders.stan([ T.div(render=T.directive("meta"), class_="warning")["_warning"], T.table(class_="keyvalue", render=rend.mapping, data=T.directive("firstrow")) [ [[T.tr[ T.th(data=colDef, render=T.directive("headCell"), class_="thHorizontal"), td], T.tr(class_="keyvaluedesc")[T.td(colspan=2)[ colDef.description]]] for colDef, td in zip(self.serManager.table.tableDef.columns, self.defaultTds)]], T.invisible(render=T.directive("footnotes")), ])
def recentReviews(): result = fetch(""" PREFIX rev: <http://purl.org/stuff/rev#> PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> SELECT ?thing ?name ?review ?createdOn ?rating WHERE { ?thing rdfs:label ?name ; rev:hasReview ?review . ?review rev:reviewer <people/drewp> ; rev:createdOn ?createdOn ; rev:rating ?rating . } ORDER BY DESC(?createdOn) LIMIT 10 """) et = fromstring(result) headers = [e.get('name') for e in et.find(SPARQL_RESULTS + 'head')] rows = [] for result in et.find(SPARQL_RESULTS + 'results').getchildren(): bindings = dict([(b.get('name').replace('?',''), nodeElement(b.getchildren())) for b in result.findall(SPARQL_RESULTS + 'binding')]) rows.append(bindings) rows.sort(key=lambda row: row['createdOn'], reverse=True) return flat.ten.flatten(T.table(class_="recentReviews")[ T.tr[T.th(class_="recentReviews title", colspan=3)[ "Recent reviews on ", T.a(class_="recentReviews", href="http://revyu.com")["revyu.com"], #" (-tmp)" ]], T.tr[T.th["Date"], T.th["Review"], T.th["Rating"]], [T.tr[T.td(class_="date")[row['createdOn'].split('T')[0]], T.td(class_="subj")[T.a(href=row['review'])[row['name']]], T.td(class_="rate")[row['rating']]] for row in rows] ])
def recentReviews(): result = fetch(""" PREFIX rev: <http://purl.org/stuff/rev#> PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> SELECT ?thing ?name ?review ?createdOn ?rating WHERE { ?thing rdfs:label ?name ; rev:hasReview ?review . ?review rev:reviewer <people/drewp> ; rev:createdOn ?createdOn ; rev:rating ?rating . } ORDER BY DESC(?createdOn) LIMIT 10 """) et = fromstring(result) headers = [e.get('name') for e in et.find(SPARQL_RESULTS + 'head')] rows = [] for result in et.find(SPARQL_RESULTS + 'results').getchildren(): bindings = dict([(b.get('name').replace('?', ''), nodeElement(b.getchildren())) for b in result.findall(SPARQL_RESULTS + 'binding')]) rows.append(bindings) rows.sort(key=lambda row: row['createdOn'], reverse=True) return flat.ten.flatten( T.table(class_="recentReviews") [T.tr[T.th(class_="recentReviews title", colspan=3)[ "Recent reviews on ", T.a(class_="recentReviews", href="http://revyu.com")["revyu.com"], #" (-tmp)" ]], T.tr[T.th["Date"], T.th["Review"], T.th["Rating"]], [ T.tr[T.td(class_="date")[row['createdOn'].split('T')[0]], T.td(class_="subj")[T.a(href=row['review'])[row['name']]], T.td(class_="rate")[row['rating']]] for row in rows ]])
def _render_results(self, ctx, cr): assert ICheckResults(cr) c = self.client sb = c.get_storage_broker() r = [] def add(name, value): r.append(T.li[name + ": ", value]) add("Report", T.pre["\n".join(self._html(cr.get_report()))]) add( "Share Counts", "need %d-of-%d, have %d" % (cr.get_encoding_needed(), cr.get_encoding_expected(), cr.get_share_counter_good())) add("Hosts with good shares", cr.get_host_counter_good_shares()) if cr.get_corrupt_shares(): badsharemap = [] for (s, si, shnum) in cr.get_corrupt_shares(): d = T.tr[T.td["sh#%d" % shnum], T.td[T.div(class_="nickname")[s.get_nickname()], T.div(class_="nodeid")[T.tt[s.get_name()]]], ] badsharemap.append(d) add( "Corrupt shares", T.table()[T.tr[T.th["Share ID"], T.th(class_="nickname-and-peerid")[ T.div["Nickname"], T.div(class_="nodeid")["Node ID"]]], badsharemap]) else: add("Corrupt shares", "none") add("Wrong Shares", cr.get_share_counter_wrong()) sharemap_data = [] shares_on_server = dictutil.DictOfSets() # FIXME: The two tables below contain nickname-and-nodeid table column markup which is duplicated with each other, introducer.xhtml, and deep-check-results.xhtml. All of these (and any other presentations of nickname-and-nodeid) should be combined. for shareid in sorted(cr.get_sharemap().keys()): servers = sorted(cr.get_sharemap()[shareid], key=lambda s: s.get_longname()) for i, s in enumerate(servers): shares_on_server.add(s, shareid) shareid_s = "" if i == 0: shareid_s = shareid d = T.tr[T.td[shareid_s], T.td[T.div(class_="nickname")[s.get_nickname()], T.div(class_="nodeid")[T.tt[s.get_name()]]]] sharemap_data.append(d) add( "Good Shares (sorted in share order)", T.table()[T.tr[T.th["Share ID"], T.th(class_="nickname-and-peerid")[ T.div["Nickname"], T.div(class_="nodeid")["Node ID"]]], sharemap_data]) add("Recoverable Versions", cr.get_version_counter_recoverable()) add("Unrecoverable Versions", cr.get_version_counter_unrecoverable()) # this table is sorted by permuted order permuted_servers = [ s for s in sb.get_servers_for_psi(cr.get_storage_index()) ] num_shares_left = sum( [len(shareids) for shareids in shares_on_server.values()]) servermap = [] for s in permuted_servers: shareids = list(shares_on_server.get(s, [])) shareids.reverse() shareids_s = [T.tt[shareid, " "] for shareid in sorted(shareids)] d = T.tr[T.td[T.div(class_="nickname")[s.get_nickname()], T.div(class_="nodeid")[T.tt[s.get_name()]]], T.td[shareids_s], ] servermap.append(d) num_shares_left -= len(shareids) if not num_shares_left: break add( "Share Balancing (servers in permuted order)", T.table()[T.tr[T.th( class_="nickname-and-peerid")[T.div["Nickname"], T.div( class_="nodeid")["Node ID"]], T.th["Share IDs"]], servermap]) return T.ul[r]
def _render_results(self, ctx, cr): assert ICheckResults(cr) c = self.client sb = c.get_storage_broker() data = cr.get_data() r = [] def add(name, value): r.append(T.li[name + ": ", value]) add("Report", T.pre["\n".join(self._html(cr.get_report()))]) add("Share Counts", "need %d-of-%d, have %d" % (data["count-shares-needed"], data["count-shares-expected"], data["count-shares-good"])) add("Hosts with good shares", data["count-good-share-hosts"]) if data["list-corrupt-shares"]: badsharemap = [] for (serverid, si, shnum) in data["list-corrupt-shares"]: nickname = sb.get_nickname_for_serverid(serverid) badsharemap.append(T.tr[T.td["sh#%d" % shnum], T.td[T.div(class_="nickname")[nickname], T.div(class_="nodeid")[T.tt[base32.b2a(serverid)]]], ]) add("Corrupt shares", T.table()[ T.tr[T.th["Share ID"], T.th(class_="nickname-and-peerid")[T.div["Nickname"], T.div(class_="nodeid")["Node ID"]]], badsharemap]) else: add("Corrupt shares", "none") add("Wrong Shares", data["count-wrong-shares"]) sharemap = [] servers = {} # FIXME: The two tables below contain nickname-and-nodeid table column markup which is duplicated with each other, introducer.xhtml, and deep-check-results.xhtml. All of these (and any other presentations of nickname-and-nodeid) should be combined. for shareid in sorted(data["sharemap"].keys()): serverids = data["sharemap"][shareid] for i,serverid in enumerate(serverids): if serverid not in servers: servers[serverid] = [] servers[serverid].append(shareid) shareid_s = "" if i == 0: shareid_s = shareid nickname = sb.get_nickname_for_serverid(serverid) sharemap.append(T.tr[T.td[shareid_s], T.td[T.div(class_="nickname")[nickname], T.div(class_="nodeid")[T.tt[base32.b2a(serverid)]]] ]) add("Good Shares (sorted in share order)", T.table()[T.tr[T.th["Share ID"], T.th(class_="nickname-and-peerid")[T.div["Nickname"], T.div(class_="nodeid")["Node ID"]]], sharemap]) add("Recoverable Versions", data["count-recoverable-versions"]) add("Unrecoverable Versions", data["count-unrecoverable-versions"]) # this table is sorted by permuted order sb = c.get_storage_broker() permuted_servers = [s for s in sb.get_servers_for_psi(cr.get_storage_index())] num_shares_left = sum([len(shares) for shares in servers.values()]) servermap = [] for s in permuted_servers: nickname = s.get_nickname() shareids = servers.get(s.get_serverid(), []) shareids.reverse() shareids_s = [ T.tt[shareid, " "] for shareid in sorted(shareids) ] servermap.append(T.tr[T.td[T.div(class_="nickname")[nickname], T.div(class_="nodeid")[T.tt[s.get_name()]]], T.td[shareids_s], ]) num_shares_left -= len(shareids) if not num_shares_left: break add("Share Balancing (servers in permuted order)", T.table()[T.tr[T.th(class_="nickname-and-peerid")[T.div["Nickname"], T.div(class_="nodeid")["Node ID"]], T.th["Share IDs"]], servermap]) return T.ul[r]
def calendar(self, ctx, data): now = datetime.datetime.now() self.current_date = now month_delta = datetime.timedelta(31) options = itaglibrary.ICalendarOptions(ctx, {}) strftime = options.get('strftime', '%b %d, %Y @ %I:%M %p') width = options.get('width', 2) prev = options.get('prev', None) next = options.get('next', None) base = options.get('base_url', None) calendar_class = options.get('calendar_class', 'calendar') if data is None: d = now current = d.year, d.month elif isinstance(data, tuple): year, month = data d = datetime.date(year, month, 4) current = data elif isinstance(data, (datetime.date, datetime.datetime)): d = data current = d.year, d.month if prev is None or next is None: p = d - month_delta n = d + month_delta prev = p.year, p.month next = n.year, n.month if base is None: u = url.URL.fromContext(ctx) segments = u.pathList() if segments[-1] == '': u = u.up() segments = segments[:-1] if segments[-1].isdigit() and segments[-2].isdigit(): u = u.up().up() prev_url = u next_url = u else: prev_url = base next_url = base add_query_params = False def buildUrl(u, el): if add_query_params: param_name, param_value = el u = u.add(param_name, str(param_value)) else: u = u.child(str(el)) return u for el in prev: if el == '?': add_query_params = True continue prev_url = buildUrl(prev_url, el) add_query_params = False for el in next: if el == '?': add_query_params = True continue next_url = buildUrl(next_url, el) else: if isinstance(prev, (url.URL, url.URLOverlay)) and \ isinstance(next, (url.URL, url.URLOverlay)): next_url = next prev_url = prev return t.table(class_=calendar_class)[ t.thead[ t.tr[ t.th(colspan="7")[ t.a(href=prev_url)[t.xml("←")], t.xml(" "), t.xml('-'.join([str(el) for el in current])), t.xml(" "), t.a(href=next_url)[t.xml("→")] ] ], [ t.tr[[t.td[dayname] for dayname in calendar.weekheader(width).split()]] ] ], t.tbody[ t.invisible(data=self.days(*current), render=rend.sequence)[ t.tr(pattern='item', render=rend.sequence)[ t.td(pattern='item', render=self.render_calendarDay) ] ] ], t.tfoot[ t.tr[ t.td(colspan="7")[ now.strftime(strftime) ] ] ] ]
def render_content(self, ctx, data): keys = [i for i in os.listdir('/etc/openvpn/keys/') if '.key' in i] keys.sort() for key in ['vpn.key', 'ca.key']: try: keys.remove(key) except: pass # Build a list of tunnels types={} # reprocess the configuration for name, conf in self.sysconf.Tunnel.items(): if name == "ipv6": continue cnf = conf cnf['name'] = str(name) if types.get(str(cnf['type'])): types[str(cnf['type'])].append(cnf) else: types[str(cnf['type'])] = [cnf] # Check vpn is configured if os.path.exists('/etc/openvpn/vpn.conf'): userForm = tags.directive('form addUser') else: userForm = tags.strong["Please configure the VPN in order to add new users"] tuns = [] # Call the handler functions with the stores ifs = Utils.getInterfaces() for k,v in types.items(): if v: v.sort() for c,tun in enumerate(v): status = tags.a(href='Start/%s/' % tun['name'])["Disconnected"] if k == 'openvpn': # Hunt TAP interfaces if 'tap%s' % (c+1) in ifs: status = tags.a(href='Stop/%s/' % tun['name'])["Connected"] tuns.append(( status, tun['name'], tun['type'], tun['endpoint'], tags.a(href="Delete/%s/" % (tun['name']))["Delete"] )) return ctx.tag[ tags.h3[tags.img(src="/images/vpn.png"), self.text.vpnConfig], PageHelpers.TabSwitcher(( (self.text.vpnTabWindows, 'panelWindows'), (self.text.vpnTabTCS, 'panelOpenVPN'), (self.text.vpnTabUsers, 'panelVPNUsers'), (self.text.vpnTabTun, 'panelTun') )), tags.div(id="panelWindows", _class="tabPane")[ tags.h3[self.text.vpnHeadingWindows], tags.directive('form winForm'), ], tags.div(id="panelTun", _class="tabPane")[ tags.h3["Tunnels"], PageHelpers.dataTable(['Status', 'Name', 'Type', 'Endpoint', ''], tuns), tags.h3["Add tunnel"], tags.directive('form addTun'), tags.br, ], tags.div(id="panelOpenVPN", _class="tabPane")[ tags.h3[self.text.vpnHeadingTCS], tags.directive('form vpnForm'), tags.br, ], tags.div(id="panelVPNUsers", _class="tabPane")[ tags.h3[self.text.vpnHeadingTCSUsers], tags.table(cellspacing=0, _class='sortable')[ tags.thead(background="/images/gradMB.png")[ tags.tr[ tags.th(colformat="str")[self.text.vpnCertificateName], tags.th[""], ] ], tags.tbody[ [ tags.tr[ tags.td['.'.join(i.split('.')[:-1])], tags.td[ tags.a( href="Revoke/%s/" % '.'.join(i.split('.')[:-1]), onclick="return confirm('%s');" % self.text.vpnConfirmRevoke )[ tags.img(src="/images/ex.png") ] ] ] for i in keys], ] ], tags.br, tags.h3[self.text.vpnHeadingAddUser], userForm ], PageHelpers.LoadTabSwitcher() ]
def _render_results(self, ctx, cr): assert ICheckResults(cr) c = self.client sb = c.get_storage_broker() data = cr.get_data() r = [] def add(name, value): r.append(T.li[name + ": ", value]) add("Report", T.pre["\n".join(self._html(cr.get_report()))]) add( "Share Counts", "need %d-of-%d, have %d" % (data["count-shares-needed"], data["count-shares-expected"], data["count-shares-good"])) add("Hosts with good shares", data["count-good-share-hosts"]) if data["list-corrupt-shares"]: badsharemap = [] for (serverid, si, shnum) in data["list-corrupt-shares"]: nickname = sb.get_nickname_for_serverid(serverid) badsharemap.append(T.tr[T.td["sh#%d" % shnum], T.td[ T.div(class_="nickname")[nickname], T.div(class_="nodeid")[T.tt[base32.b2a(serverid)]]], ]) add( "Corrupt shares", T.table()[T.tr[T.th["Share ID"], T.th(class_="nickname-and-peerid")[ T.div["Nickname"], T.div(class_="nodeid")["Node ID"]]], badsharemap]) else: add("Corrupt shares", "none") add("Wrong Shares", data["count-wrong-shares"]) sharemap = [] servers = {} # FIXME: The two tables below contain nickname-and-nodeid table column markup which is duplicated with each other, introducer.xhtml, and deep-check-results.xhtml. All of these (and any other presentations of nickname-and-nodeid) should be combined. for shareid in sorted(data["sharemap"].keys()): serverids = data["sharemap"][shareid] for i, serverid in enumerate(serverids): if serverid not in servers: servers[serverid] = [] servers[serverid].append(shareid) shareid_s = "" if i == 0: shareid_s = shareid nickname = sb.get_nickname_for_serverid(serverid) sharemap.append(T.tr[T.td[shareid_s], T.td[ T.div(class_="nickname")[nickname], T.div(class_="nodeid")[T.tt[base32.b2a(serverid)]]]]) add( "Good Shares (sorted in share order)", T.table()[T.tr[T.th["Share ID"], T.th(class_="nickname-and-peerid")[ T.div["Nickname"], T.div(class_="nodeid")["Node ID"]]], sharemap]) add("Recoverable Versions", data["count-recoverable-versions"]) add("Unrecoverable Versions", data["count-unrecoverable-versions"]) # this table is sorted by permuted order sb = c.get_storage_broker() permuted_servers = [ s for s in sb.get_servers_for_psi(cr.get_storage_index()) ] num_shares_left = sum([len(shares) for shares in servers.values()]) servermap = [] for s in permuted_servers: nickname = s.get_nickname() shareids = servers.get(s.get_serverid(), []) shareids.reverse() shareids_s = [T.tt[shareid, " "] for shareid in sorted(shareids)] servermap.append( T.tr[T.td[T.div(class_="nickname")[nickname], T.div(class_="nodeid")[T.tt[s.get_name()]]], T.td[shareids_s], ]) num_shares_left -= len(shareids) if not num_shares_left: break add( "Share Balancing (servers in permuted order)", T.table()[T.tr[T.th( class_="nickname-and-peerid")[T.div["Nickname"], T.div( class_="nodeid")["Node ID"]], T.th["Share IDs"]], servermap]) return T.ul[r]
def testXbuild_grid(argsdict, request=None): start_time = clock() one_week = 60 * 60 * 24 * 7 one_week_ago = start_time - one_week max_results = (int(argsdict['max_results']) if ('max_results' in argsdict and argsdict['max_results'] != '') else 128) if max_results < 1: print 'Max results must be at least 1' sys.exit(1) countdown = max_results test_cases = (None if ('test_cases' not in argsdict or argsdict['test_cases'] == None or argsdict['test_cases'] == '') else (compile(argsdict['test_cases']) if ('case' in argsdict and argsdict['case']) else compile(argsdict['test_cases'], IGNORECASE))) exclude_cases = (None if ('exclude_cases' not in argsdict or argsdict['exclude_cases'] == None or argsdict['exclude_cases'] == '') else (compile(argsdict['exclude_cases']) if ('case' in argsdict and argsdict['case']) else compile(argsdict['exclude_cases'], IGNORECASE))) results_by_build = {} results = [] tests = set() build_ids = [] mongo = bvtlib.mongodb.get_autotest() branch = argsdict['branch'] if 'branch' in argsdict else 'master' builds_query = {'branch': branch} force = 'force' in argsdict and argsdict['force'] sort_columns = argsdict['sort_columns'] if 'sort_columns' in argsdict else 'alphabetic' total_fails_by_test = {} total_passes_by_test = {} day_results = {} latest_year = None latest_yday = None day_fails = 0 day_passes = 0 for build in mongo.builds.find(builds_query).sort([('tag_time', DESCENDING)]): build_id = build['_id'] build_time = (build['tag_time'] if 'tag_time' in build else (['timestamp'] if 'timestamp' in build else None)) successes_for_build = {} failures_for_build = {} results_query={'build': build_id} interesting = False for result in mongo.results.find(results_query): if 'infrastructure_problem' not in result or result['infrastructure_problem'] == False: if 'test_case' in result: test_case = result['test_case'] if (test_case != None and (test_cases == None or test_cases.search(test_case)) and (exclude_cases == None or not exclude_cases.search(test_case)) and (force or 'experiments.py' not in test_case)): if 'failure' in result and result['failure'] != '': result_details = result['failure'] interesting = True if test_case in failures_for_build: failures_for_build[test_case].append(result_details) else: failures_for_build[test_case] = [ result_details ] if test_case in total_fails_by_test: total_fails_by_test[test_case] += 1 else: total_fails_by_test[test_case] = 1 day_fails += 1 else: if 'end_time' in result: interesting = True if test_case in successes_for_build: successes_for_build[test_case].append(result) else: successes_for_build[test_case] = [ result ] if test_case in total_passes_by_test: total_passes_by_test[test_case] += 1 else: total_passes_by_test[test_case] = 1 day_passes += 1 if interesting: results_for_build = (build_time, successes_for_build, failures_for_build) gmt = gmtime(float(build_time)) if ((gmt.tm_year != latest_year) or (gmt.tm_yday != latest_yday)): latest_year = gmt.tm_year latest_yday = gmt.tm_yday date_text = strftime('%Y-%m-%d', gmt) day_results[date_text] = (': ' + repr(day_passes) + ' passed, ' + repr(day_fails) + ' failed') day_fails = 0 day_passes = 0 results.append(results_for_build) results_by_build[build_id] = results_for_build build_ids.append(build_id) countdown -= 1 if countdown == 0: break tests.update(failures_for_build.keys()) tests.update(successes_for_build) if countdown == 0: break # convert from set to list test_names = [ test for test in tests ] if sort_columns == 'ratio': sort_text = 'Columns are sorted by decreasing ratio of fails.' ratios = {} for test in test_names: passes = total_passes_by_test[test] if test in total_passes_by_test else 0 fails = total_fails_by_test[test] if test in total_fails_by_test else 0 ratios[test] = -1 if (passes == 0 and fails == 0) else fails / (passes + fails) test_names = [ name for name, count in sorted(ratios.iteritems(), key = itemgetter(1), reverse=True) ] elif sort_columns == 'frequency': sort_text = 'Columns are sorted by decreasing number of fails.' frequencies = {} for test in test_names: frequencies[test] = total_fails_by_test[test] if test in total_fails_by_test else 0 test_names = [ name for name, count in sorted(frequencies.iteritems(), key = itemgetter(1), reverse=True) ] elif sort_columns == 'alphabetic': sort_text = 'Columns are sorted alphabetically by test case description.' test_names.sort() else: sort_text = 'Columns are not sorted, as an unknown sort type "' + repr(sort_columns) + '" was specified.' column_number = 1 column_numbers = {} column_names = [th['Test case']] column_keys = [] test_labels = {} for test_name in test_names: test_label = test_name.replace(' ', '_') column_numbers[test_name] = column_number column_heading = th[a(href="#"+test_label, title=test_name) [repr(column_number)]] column_names.append(column_heading) column_keys.append(li[a(name=test_label) [a(href="http://autotest/results?reverse=1&test_case="+test_name) [test_name]]]) test_labels[test_name] = test_label column_number += 1 rows = [column_names] latest_year = None latest_yday = None column_count = 1 + len(column_names) build_number_pattern = compile('.+-([0-9]+)-.+') day_heading_style = {'colspan':column_count, 'class':'day_heading'} for build_id in build_ids: (build_time, successes, failures) = results_by_build[build_id] try: build_number_match = build_number_pattern.match(build_id) build_number_string = build_number_match.group(1) if build_number_match else build_id gmt = gmtime(float(build_time)) if ((gmt.tm_year != latest_year) or (gmt.tm_yday != latest_yday)): latest_year = gmt.tm_year latest_yday = gmt.tm_yday raw_date_text = strftime('%Y-%m-%d', gmt) date_text = raw_date_text if float(build_time) >= one_week_ago: date_text += strftime(' (%A)', gmt) if day_results[raw_date_text] != None: date_text += day_results[raw_date_text] rows.append([tr[th(**day_heading_style)[date_text]]]) cells = [th(title=(build_id + '\n' + asctime(gmt)))[ a(href="http://autotest/build/"+build_id)[build_number_string], br(), strftime('%H:%M:%S', gmt) ]] except TypeError: gmt = None cells = [th[a(href="http://autotest/build/"+build_id)[build_id]]] for test in test_names: success_count = len(successes[test]) if test in successes else 0 this_test_failures = failures[test] if test in failures else None fail_count = len(this_test_failures) if this_test_failures != None else 0 some_passed = success_count > 0 some_failed = fail_count > 0 no_results = not (some_passed or some_failed) if proportionate_colour: colour = white if no_results else rgb_string(fail_count, success_count, 0, intensity=0.5) else: several_failed = fail_count > 1 colour = (amber if some_passed and some_failed else (white if no_results else ((green if success_count > 1 else pale_green) if not some_failed else (red if several_failed else pale_red)))) cell_hover_text = test + ': ' + repr(success_count) + (' pass' if success_count == 1 else ' passes') if some_failed: # collect up identical error messages so we can just give a count instead of repeating them fail_detail_counts = {} for x in this_test_failures: fail_detail_counts[x] = fail_detail_counts[x] + 1 if x in fail_detail_counts else 1 details = [ repr(count) + ": " + # display commonest error messages first message for message, count in sorted(fail_detail_counts.iteritems(), key = itemgetter(1), reverse=True) if message != None] cell_hover_text = cell_hover_text + '\nFailures:\n' + ('\n'.join(details)) cell_text = [div(align='left')[repr(success_count)], div(align='right')[repr(fail_count)]] if some_passed or some_failed: cells.append(td(bgcolor=colour) [a(href="results?build="+build_id+"&test_case="+test, title=cell_hover_text)[cell_text]]) else: cells.append(td[' ']) rows.append([tr[cells]]) passes_row = [th['Passes']] fails_row = [th['Fails']] for test_name in test_names: pass_count = total_passes_by_test[test_name] if test_name in total_passes_by_test else 0 fail_count = total_fails_by_test[test_name] if test_name in total_fails_by_test else 0 total = pass_count + fail_count colour_string = 'white' if total == 0 else rgb_string(fail_count, pass_count, 0, intensity=0.5) passes_row.append(td(bgcolor=colour_string)[repr(pass_count)]) fails_row.append(td(bgcolor=colour_string)[repr(fail_count)]) rows.insert(1, tr[fails_row]) rows.insert(1, tr[passes_row]) column_key = [ol[column_keys]] table_grid = [table(border='true', style="border-collapse: collapse", align="center", width="96%")[rows]] title_text = 'BVT results grid for '+branch if ('test_cases' in argsdict and argsdict['test_cases'] != None and argsdict['test_cases'] != ''): title_text += ' matching "' + argsdict['test_cases'] + '"' if ('exclude_cases' in argsdict and argsdict['exclude_cases'] != None and argsdict['exclude_cases'] != ''): title_text += ' excluding "' + argsdict['exclude_cases'] + '"' if request != None: requery_form = [ table(align="center", width="96%", bgcolor="#f0f0f0")[ tr()[td()['Branch: ', stan_input(name='branch', value=branch)['']], td()['Test cases: ', stan_input(name='test_cases', value=(argsdict['test_cases'] if 'test_cases' in argsdict else ''))[''], " ", 'Excluded cases: ', stan_input(name='exclude_cases', value=(argsdict['exclude_cases'] if 'exclude_cases' in argsdict else ''))[''], " ", 'Case-significant search', stan_input(type='checkbox', name='case')['']], td()['Include malformed results:', stan_input(type='checkbox', name='force')['']]], tr()[td()['Columns sort order: ', stan_input(type='radio', name='sort_columns', value='alphabetic', ** ({'checked':1} if sort_columns == 'alphabetic' else {}))['alphabetic'], " ", stan_input(type='radio', name='sort_columns', value='frequency', ** ({'checked':1} if sort_columns == 'frequency' else {}))['frequency'], " ", stan_input(type='radio', name='sort_columns', value='ratio', ** ({'checked':1} if sort_columns == 'ratio' else {}))['ratio']], td()['Max results:', stan_input(name='max_results', value=max_results)['']], td()[stan_input(type='submit')]]]] else: requery_form = None page_contents = [title[title_text], h1[title_text]] page_contents += [p[key_text], p[sort_text], table_grid, h2['column key'], p[sort_text], column_key] if not proportionate_colour: page_contents += [h2['cell key'], key_table] page_contents += [hr(), div(align = 'right')['produced at ', asctime()]] return str(nevow.flat.flatten(page_contents)), str(nevow.flat.flatten(requery_form))
def calendar(self, ctx, data): now = datetime.datetime.now() self.current_date = now month_delta = datetime.timedelta(31) options = itaglibrary.ICalendarOptions(ctx, {}) strftime = options.get('strftime', '%b %d, %Y @ %I:%M %p') width = options.get('width', 2) prev = options.get('prev', None) next = options.get('next', None) base = options.get('base_url', None) calendar_class = options.get('calendar_class', 'calendar') if data is None: d = now current = d.year, d.month elif isinstance(data, tuple): year, month = data d = datetime.date(year, month, 4) current = data elif isinstance(data, (datetime.date, datetime.datetime)): d = data current = d.year, d.month if prev is None or next is None: p = d - month_delta n = d + month_delta prev = p.year, p.month next = n.year, n.month if base is None: u = url.URL.fromContext(ctx) segments = u.pathList() if segments[-1] == '': u = u.up() segments = segments[:-1] if segments[-1].isdigit() and segments[-2].isdigit(): u = u.up().up() prev_url = u next_url = u else: prev_url = base next_url = base add_query_params = False def buildUrl(u, el): if add_query_params: param_name, param_value = el u = u.add(param_name, str(param_value)) else: u = u.child(str(el)) return u for el in prev: if el == '?': add_query_params = True continue prev_url = buildUrl(prev_url, el) add_query_params = False for el in next: if el == '?': add_query_params = True continue next_url = buildUrl(next_url, el) else: if isinstance(prev, (url.URL, url.URLOverlay)) and \ isinstance(next, (url.URL, url.URLOverlay)): next_url = next prev_url = prev return t.table( class_=calendar_class )[t.thead[t.tr[t.th( colspan="7")[t.a(href=prev_url)[t.xml("←")], t.xml(" "), t.xml('-'.join([str(el) for el in current])), t.xml(" "), t.a(href=next_url)[t.xml("→")]]], [ t.tr[[ t.td[dayname] for dayname in calendar.weekheader(width).split() ]] ]], t.tbody[t.invisible(data=self.days(*current), render=rend.sequence)[ t.tr(pattern='item', render=rend.sequence )[t.td(pattern='item', render=self.render_calendarDay)]]], t.tfoot[t.tr[t.td(colspan="7")[now.strftime(strftime)]]]]
def testXbuild_grid(argsdict, request=None): start_time = clock() one_week = 60 * 60 * 24 * 7 one_week_ago = start_time - one_week max_results = (int(argsdict['max_results']) if ('max_results' in argsdict and argsdict['max_results'] != '') else 128) if max_results < 1: print 'Max results must be at least 1' sys.exit(1) countdown = max_results test_cases = (None if ('test_cases' not in argsdict or argsdict['test_cases'] == None or argsdict['test_cases'] == '') else (compile(argsdict['test_cases']) if ('case' in argsdict and argsdict['case']) else compile( argsdict['test_cases'], IGNORECASE))) exclude_cases = (None if ('exclude_cases' not in argsdict or argsdict['exclude_cases'] == None or argsdict['exclude_cases'] == '') else (compile(argsdict['exclude_cases']) if ('case' in argsdict and argsdict['case']) else compile( argsdict['exclude_cases'], IGNORECASE))) results_by_build = {} results = [] tests = set() build_ids = [] mongo = src.bvtlib.mongodb.get_autotest() branch = argsdict['branch'] if 'branch' in argsdict else 'master' builds_query = {'branch': branch} force = 'force' in argsdict and argsdict['force'] sort_columns = argsdict[ 'sort_columns'] if 'sort_columns' in argsdict else 'alphabetic' total_fails_by_test = {} total_passes_by_test = {} day_results = {} latest_year = None latest_yday = None day_fails = 0 day_passes = 0 for build in mongo.builds.find(builds_query).sort([('tag_time', DESCENDING) ]): build_id = build['_id'] build_time = (build['tag_time'] if 'tag_time' in build else (['timestamp'] if 'timestamp' in build else None)) successes_for_build = {} failures_for_build = {} results_query = {'build': build_id} interesting = False for result in mongo.results.find(results_query): if 'infrastructure_problem' not in result or result[ 'infrastructure_problem'] == False: if 'test_case' in result: test_case = result['test_case'] if (test_case != None and (test_cases == None or test_cases.search(test_case)) and (exclude_cases == None or not exclude_cases.search(test_case)) and (force or 'experiments.py' not in test_case)): if 'failure' in result and result['failure'] != '': result_details = result['failure'] interesting = True if test_case in failures_for_build: failures_for_build[test_case].append( result_details) else: failures_for_build[test_case] = [ result_details ] if test_case in total_fails_by_test: total_fails_by_test[test_case] += 1 else: total_fails_by_test[test_case] = 1 day_fails += 1 else: if 'end_time' in result: interesting = True if test_case in successes_for_build: successes_for_build[test_case].append( result) else: successes_for_build[test_case] = [result] if test_case in total_passes_by_test: total_passes_by_test[test_case] += 1 else: total_passes_by_test[test_case] = 1 day_passes += 1 if interesting: results_for_build = (build_time, successes_for_build, failures_for_build) gmt = gmtime(float(build_time)) if ((gmt.tm_year != latest_year) or (gmt.tm_yday != latest_yday)): latest_year = gmt.tm_year latest_yday = gmt.tm_yday date_text = strftime('%Y-%m-%d', gmt) day_results[date_text] = (': ' + repr(day_passes) + ' passed, ' + repr(day_fails) + ' failed') day_fails = 0 day_passes = 0 results.append(results_for_build) results_by_build[build_id] = results_for_build build_ids.append(build_id) countdown -= 1 if countdown == 0: break tests.update(failures_for_build.keys()) tests.update(successes_for_build) if countdown == 0: break # convert from set to list test_names = [test for test in tests] if sort_columns == 'ratio': sort_text = 'Columns are sorted by decreasing ratio of fails.' ratios = {} for test in test_names: passes = total_passes_by_test[ test] if test in total_passes_by_test else 0 fails = total_fails_by_test[ test] if test in total_fails_by_test else 0 ratios[test] = -1 if (passes == 0 and fails == 0) else fails / (passes + fails) test_names = [ name for name, count in sorted( ratios.iteritems(), key=itemgetter(1), reverse=True) ] elif sort_columns == 'frequency': sort_text = 'Columns are sorted by decreasing number of fails.' frequencies = {} for test in test_names: frequencies[test] = total_fails_by_test[ test] if test in total_fails_by_test else 0 test_names = [ name for name, count in sorted( frequencies.iteritems(), key=itemgetter(1), reverse=True) ] elif sort_columns == 'alphabetic': sort_text = 'Columns are sorted alphabetically by test case description.' test_names.sort() else: sort_text = 'Columns are not sorted, as an unknown sort type "' + repr( sort_columns) + '" was specified.' column_number = 1 column_numbers = {} column_names = [th['Test case']] column_keys = [] test_labels = {} for test_name in test_names: test_label = test_name.replace(' ', '_') column_numbers[test_name] = column_number column_heading = th[a(href="#" + test_label, title=test_name)[repr(column_number)]] column_names.append(column_heading) column_keys.append(li[a(name=test_label)[a( href="http://autotest/results?reverse=1&test_case=" + test_name)[test_name]]]) test_labels[test_name] = test_label column_number += 1 rows = [column_names] latest_year = None latest_yday = None column_count = 1 + len(column_names) build_number_pattern = compile('.+-([0-9]+)-.+') day_heading_style = {'colspan': column_count, 'class': 'day_heading'} for build_id in build_ids: (build_time, successes, failures) = results_by_build[build_id] try: build_number_match = build_number_pattern.match(build_id) build_number_string = build_number_match.group( 1) if build_number_match else build_id gmt = gmtime(float(build_time)) if ((gmt.tm_year != latest_year) or (gmt.tm_yday != latest_yday)): latest_year = gmt.tm_year latest_yday = gmt.tm_yday raw_date_text = strftime('%Y-%m-%d', gmt) date_text = raw_date_text if float(build_time) >= one_week_ago: date_text += strftime(' (%A)', gmt) if day_results[raw_date_text] != None: date_text += day_results[raw_date_text] rows.append([tr[th(**day_heading_style)[date_text]]]) cells = [ th(title=(build_id + '\n' + asctime(gmt)))[a(href="http://autotest/build/" + build_id)[build_number_string], br(), strftime('%H:%M:%S', gmt)] ] except TypeError: gmt = None cells = [th[a(href="http://autotest/build/" + build_id)[build_id]]] for test in test_names: success_count = len(successes[test]) if test in successes else 0 this_test_failures = failures[test] if test in failures else None fail_count = len( this_test_failures) if this_test_failures != None else 0 some_passed = success_count > 0 some_failed = fail_count > 0 no_results = not (some_passed or some_failed) if proportionate_colour: colour = white if no_results else rgb_string( fail_count, success_count, 0, intensity=0.5) else: several_failed = fail_count > 1 colour = (amber if some_passed and some_failed else (white if no_results else ((green if success_count > 1 else pale_green ) if not some_failed else (red if several_failed else pale_red)))) cell_hover_text = test + ': ' + repr(success_count) + ( ' pass' if success_count == 1 else ' passes') if some_failed: # collect up identical error messages so we can just give a count instead of repeating them fail_detail_counts = {} for x in this_test_failures: fail_detail_counts[x] = fail_detail_counts[ x] + 1 if x in fail_detail_counts else 1 details = [ repr(count) + ": " + # display commonest error messages first message for message, count in sorted( fail_detail_counts.iteritems(), key=itemgetter(1), reverse=True) if message != None ] cell_hover_text = cell_hover_text + '\nFailures:\n' + ( '\n'.join(details)) cell_text = [ div(align='left')[repr(success_count)], div(align='right')[repr(fail_count)] ] if some_passed or some_failed: cells.append( td(bgcolor=colour)[a(href="results?build=" + build_id + "&test_case=" + test, title=cell_hover_text)[cell_text]]) else: cells.append(td[' ']) rows.append([tr[cells]]) passes_row = [th['Passes']] fails_row = [th['Fails']] for test_name in test_names: pass_count = total_passes_by_test[ test_name] if test_name in total_passes_by_test else 0 fail_count = total_fails_by_test[ test_name] if test_name in total_fails_by_test else 0 total = pass_count + fail_count colour_string = 'white' if total == 0 else rgb_string( fail_count, pass_count, 0, intensity=0.5) passes_row.append(td(bgcolor=colour_string)[repr(pass_count)]) fails_row.append(td(bgcolor=colour_string)[repr(fail_count)]) rows.insert(1, tr[fails_row]) rows.insert(1, tr[passes_row]) column_key = [ol[column_keys]] table_grid = [ table(border='true', style="border-collapse: collapse", align="center", width="96%")[rows] ] title_text = 'BVT results grid for ' + branch if ('test_cases' in argsdict and argsdict['test_cases'] != None and argsdict['test_cases'] != ''): title_text += ' matching "' + argsdict['test_cases'] + '"' if ('exclude_cases' in argsdict and argsdict['exclude_cases'] != None and argsdict['exclude_cases'] != ''): title_text += ' excluding "' + argsdict['exclude_cases'] + '"' if request != None: requery_form = [ table( align="center", width="96%", bgcolor="#f0f0f0" )[tr() [td()['Branch: ', stan_input(name='branch', value=branch)['']], td( )['Test cases: ', stan_input(name='test_cases', value=( argsdict['test_cases'] if 'test_cases' in argsdict else ''))[''], " ", 'Excluded cases: ', stan_input(name='exclude_cases', value=( argsdict['exclude_cases'] if 'exclude_cases' in argsdict else ''))[''], " ", 'Case-significant search', stan_input(type='checkbox', name='case')['']], td()['Include malformed results:', stan_input(type='checkbox', name='force')['']]], tr()[td()['Columns sort order: ', stan_input(type='radio', name='sort_columns', value='alphabetic', **({ 'checked': 1 } if sort_columns == 'alphabetic' else {} ))['alphabetic'], " ", stan_input(type='radio', name='sort_columns', value='frequency', **({ 'checked': 1 } if sort_columns == 'frequency' else {} ))['frequency'], " ", stan_input(type='radio', name='sort_columns', value='ratio', **({ 'checked': 1 } if sort_columns == 'ratio' else {} ))['ratio']], td()['Max results:', stan_input(name='max_results', value=max_results)['']], td()[stan_input(type='submit')]]] ] else: requery_form = None page_contents = [title[title_text], h1[title_text]] page_contents += [ p[key_text], p[sort_text], table_grid, h2['column key'], p[sort_text], column_key ] if not proportionate_colour: page_contents += [h2['cell key'], key_table] page_contents += [hr(), div(align='right')['produced at ', asctime()]] return str(nevow.flat.flatten(page_contents)), str( nevow.flat.flatten(requery_form))
r, total, intensity), adjust(g, total, intensity), adjust(b, total, intensity)) white = "white" amber = rgb_string(.5, .5, 0, .5) red = rgb_string(1, 0, 0, .5) pale_red = rgb_string(1, 0, 0, .25) green = rgb_string(0, 1, 0, .5) pale_green = rgb_string(0, 1, 0, .25) key_text = 'Each cell show successes (on the left) then failures (on the right). The hover text for each cell shows more detail of the fails.' key_table = [ table(border='true', style="border-collapse: collapse")[[ tr[th(bgcolor=green )['Tests with several passes and no fails are shown like this.']] ], [ tr[th(bgcolor=pale_green )['Tests with one pass and no fails are shown like this.']] ], [ tr[th(bgcolor=amber )['Tests with both passes and fails are shown like this.']] ], [ tr[th(bgcolor=pale_red )['Tests with one fail and no passes are shown like this.']] ], [ tr[th(bgcolor=red )['Tests with several fails and no passes are shown like this.']] ]] ]
def _render_results(self, ctx, cr): assert ICheckResults(cr) c = self.client sb = c.get_storage_broker() r = [] def add(name, value): r.append(T.li[name + ": ", value]) add("Report", T.pre["\n".join(self._html(cr.get_report()))]) add("Share Counts", "need %d-of-%d, have %d" % (cr.get_encoding_needed(), cr.get_encoding_expected(), cr.get_share_counter_good())) add("Happiness Level", cr.get_happiness()) add("Hosts with good shares", cr.get_host_counter_good_shares()) if cr.get_corrupt_shares(): badsharemap = [] for (s, si, shnum) in cr.get_corrupt_shares(): d = T.tr[T.td["sh#%d" % shnum], T.td[T.div(class_="nickname")[s.get_nickname()], T.div(class_="nodeid")[T.tt[s.get_name()]]], ] badsharemap.append(d) add("Corrupt shares", T.table()[ T.tr[T.th["Share ID"], T.th(class_="nickname-and-peerid")[T.div["Nickname"], T.div(class_="nodeid")["Node ID"]]], badsharemap]) else: add("Corrupt shares", "none") add("Wrong Shares", cr.get_share_counter_wrong()) sharemap_data = [] shares_on_server = dictutil.DictOfSets() # FIXME: The two tables below contain nickname-and-nodeid table column markup which is duplicated with each other, introducer.xhtml, and deep-check-results.xhtml. All of these (and any other presentations of nickname-and-nodeid) should be combined. for shareid in sorted(cr.get_sharemap().keys()): servers = sorted(cr.get_sharemap()[shareid], key=lambda s: s.get_longname()) for i,s in enumerate(servers): shares_on_server.add(s, shareid) shareid_s = "" if i == 0: shareid_s = shareid d = T.tr[T.td[shareid_s], T.td[T.div(class_="nickname")[s.get_nickname()], T.div(class_="nodeid")[T.tt[s.get_name()]]] ] sharemap_data.append(d) add("Good Shares (sorted in share order)", T.table()[T.tr[T.th["Share ID"], T.th(class_="nickname-and-peerid")[T.div["Nickname"], T.div(class_="nodeid")["Node ID"]]], sharemap_data]) add("Recoverable Versions", cr.get_version_counter_recoverable()) add("Unrecoverable Versions", cr.get_version_counter_unrecoverable()) # this table is sorted by permuted order permuted_servers = [s for s in sb.get_servers_for_psi(cr.get_storage_index())] num_shares_left = sum([len(shareids) for shareids in shares_on_server.values()]) servermap = [] for s in permuted_servers: shareids = list(shares_on_server.get(s, [])) shareids.reverse() shareids_s = [ T.tt[shareid, " "] for shareid in sorted(shareids) ] d = T.tr[T.td[T.div(class_="nickname")[s.get_nickname()], T.div(class_="nodeid")[T.tt[s.get_name()]]], T.td[shareids_s], ] servermap.append(d) num_shares_left -= len(shareids) if not num_shares_left: break add("Share Balancing (servers in permuted order)", T.table()[T.tr[T.th(class_="nickname-and-peerid")[T.div["Nickname"], T.div(class_="nodeid")["Node ID"]], T.th["Share IDs"]], servermap]) return T.ul[r]
return (base + ((float(x) / tot) * float(inten))) * 255 return '#%02x%02x%02x' % (adjust(r, total, intensity), adjust(g, total, intensity), adjust(b, total, intensity)) white = "white" amber = rgb_string(.5, .5, 0, .5) red = rgb_string(1, 0, 0, .5) pale_red = rgb_string(1, 0, 0, .25) green = rgb_string(0, 1, 0, .5) pale_green = rgb_string(0, 1, 0, .25) key_text = 'Each cell show successes (on the left) then failures (on the right). The hover text for each cell shows more detail of the fails.' key_table = [table(border='true', style="border-collapse: collapse")[ [tr[th(bgcolor=green)['Tests with several passes and no fails are shown like this.']]], [tr[th(bgcolor=pale_green)['Tests with one pass and no fails are shown like this.']]], [tr[th(bgcolor=amber)['Tests with both passes and fails are shown like this.']]], [tr[th(bgcolor=pale_red)['Tests with one fail and no passes are shown like this.']]], [tr[th(bgcolor=red)['Tests with several fails and no passes are shown like this.']]]]] proportionate_colour = True def testXbuild_grid(argsdict, request=None): start_time = clock() one_week = 60 * 60 * 24 * 7 one_week_ago = start_time - one_week max_results = (int(argsdict['max_results']) if ('max_results' in argsdict and argsdict['max_results'] != '') else 128)