def get_tree(request, args): ref_name = args[0] path = args[1:] ref = repo.refs[ref_name] rows = [] tree = ref.commit.tree for p in path: tree = tree[p] if isinstance(tree, git.Blob): body = html.pre(tree.data_stream.read()) else: rows.append(html.tr(html.th("Name"), html.th("Size"), html.th("Type"))) if len(args) > 1: rows.append( html.tr(html.td(html.a("..", href="/" + "/".join(["tree"] + args[:1]))), html.td(), html.td("[DIR]")) ) for d in tree.trees: link = html.td(html.a(d.name + "/", href="/" + "/".join(["tree"] + args + [d.name]))) rows.append(html.tr(link, html.td(), html.td("[DIR]"))) for blob in tree.blobs: link = html.td(html.a(blob.name, href="/" + "/".join(["tree"] + args + [blob.name]))) size = html.td(bytes_to_human(blob.size)) rows.append(html.tr(link, size, html.td(blob.mime_type))) body = html.table(*rows, **{"class": "list"}) return html_page("Tree {} /{}".format(ref_name, "/".join(path)), html.div(body))
def vaatateksti(nr): h.k("<h4>Tekst nr: ") h.a(nr) h.kr("</h4>") sql = "SELECT et.lausenr AS nr, et.lause AS etlause, vro.lause AS vrolause FROM wilaused_et et, wilaused_vro vro WHERE et.nr='%s' AND vro.nr='%s' AND et.lausenr=vro.lausenr ORDER BY et.lausenr;" % ( nr, nr) h.tabel_a() for r in s.sqle(d.n, sql): h.tr_l() h.td(r['nr'], 'd') h.td(r['etlause'], 'd') h.td(r['vrolause'], 'd') h.tr_l() h.tabel_l()
def select_processor(div, state, method, field_name): for p in plugin.registry: if hasattr(p, method): desc = [] desc.append(p.__doc__) if hasattr(p, 'spec'): desc.append(h.span(' (See ', h.a('language specification', href=p.spec), ")")) if cgi_args.getfirst(field_name) == p.id: button = h.input(type="radio", name=field_name, checked='YES', value=p.id) else: button = h.input(type="radio", name=field_name, value=p.id) examples = h.span() if getattr(p, 'examples', []): examples << h.br() examples << "Load example input: " for (name, text) in p.examples: examples << h.input(type="submit", name="load_example", value=name) div << h.p(button, desc, examples)
def page_link(label, offset): if offset == 0: url = "/" else: url = "/?start=%i" % offset return html.a(label, href=url)
def format_post(post): id = post["data"]["id"] title = post["data"]["title"] url = post["data"]["url"] def formatted(x): name, callback = x return "%s: %s" % (name, callback(post)) items = map(formatted, decoders) + [html.a("Permalink", href="/?id=%s" % id)] return html.div(html.a(name=id), html.h3(html.a(html.escape(title), href=url)), html.ul(*items), id="post-%s" % id)
def show_single(post_id): """ Show/edit a single post. """ p = psblog.get_post(post_id) # TODO: proper form handling for html module o="" o+="<form action=\"?page=save\" method=\"POST\">" o+="<input type=\"text\" name=\"heading\" value=\""+p["meta"]["heading"]+"\"><br>" o+="<input type=\"text\" name=\"category\" value=\""+p["meta"]["category"]+"\"><br>" o+="<input type=\"hidden\" name=\"id\" value=\""+post_id+"\"><br>" o+="<input type=\"hidden\" name=\"save\" value=\"save\"><br>" o+="<textarea style='width:100%;height:40em;' name=\"text\">"+p["text"].decode("utf8")+"</textarea><br>" o+="<input type=\"submit\"><br>" o+="</form>" i = 0 for c in p["comments"]: # TODO: make delete link a POSTing form o2=html.a("?page=del_comment&id="+post_id+"&cid="+str(i), "delete") o2+="<br>" for key,item in c.items(): # XXX HACK try: o2+=key+" - "+item.decode("utf8")+"<br>" except UnicodeEncodeError: o2+=key+" - "+item+"<br>" o += html.block(o2) i += 1 o+=html.block(p["html"]) print(html.render_admin(o.encode("utf8")))
def html_page(title, *content, **arguments): head = html.head( html.link(rel="shortcut icon", href=config["favicon"]), html.title("{} - {}".format(title, config["title"])), html.style(style), ) nav = html.nav( html.ul( html.li(html.a("⚗", href=html.absolute())), html.li(html.a("Reviews", href=html.absolute("reviews"))), html.li(html.a("Commits", href=html.absolute("commits", repo.head.ref.name))), html.li(html.a("Tree", href=html.absolute("tree", repo.head.ref.name))), html.li(html.a("Refs", href=html.absolute("refs"))), ) ) return http.Html(html.html(head, html.body(*((nav,) + content + (html.script(script),)), **arguments)))
def draw(self, users): #print 'Content-type: text/html\n\n' ipaddr = socket.gethostbyname(socket.gethostname()) baseref = 'http://%s:5000/' % ipaddr output = '' output += """ <form> <td><input type="text" name="pattern" value="" /></td> <td><input type="submit" value="Search" /></td> </form> """ output += html.tr("<th>" + "</th><th>".join(users[0]) + "</th>") addform = """ <form> <td><input type="text" name="role" value="WAITER" /></td> <td>Auto</td> <td><input type="text" name="fname" value="" /></td> <td><input type="text" name="lname" value="" /></td> <td><input type="text" name="login" value="" /></td> <td><input type="text" name="tel" value="+380" /></td> <td><input type="submit" value="Add" /></td> <input type="hidden" name="q" value="add"> </form> """ output += html.tr(addform) for user in users[1:]: ref = baseref + 'udel?id=%d' % user[1] user += html.a(ref, 'del'), output += html.tr("<td>" + "</td><td>".join([str(x) for x in user]) + "</td>") return html.doc(html.table(output))
def format_post(post): id = post["data"]["id"] title = post["data"]["title"] url = post["data"]["url"] def formatted(x): name, callback = x return "%s: %s" % (name, callback(post)) items = map(formatted, decoders) + [ html.a("Permalink", href="/?id=%s" % id) ] return html.div(html.a(name=id), html.h3(html.a(html.escape(title), href=url)), html.ul(*items), id="post-%s" % id)
def get_commit(request, args): [commit_id] = args commit = repo.commit(commit_id) parent = commit.parents[0] diff = parent.diff(commit, None, True) header = html.div(html.a(commit.hexsha[0:12], href=html.absolute("commit", commit.hexsha)), " ", commit.summary) diffs = map(diff_to_html, diff) return html_page("Commit {}".format(commit.hexsha[0:12]), header, *diffs)
def vaatalauset(lnr, tnr): h.k("Töö nr: ") h.a(tnr) h.k(" Lause nr: ") h.a(lnr) h.kr(".") vlnr = int(lnr) - 2 slnr = int(lnr) + 2 sql = "SELECT et.lausenr AS nr, et.lause AS etlause, vro.lause AS vrolause FROM wilaused_et et, wilaused_vro vro WHERE et.nr='%s' AND vro.nr='%s' AND et.lausenr=vro.lausenr AND et.lausenr >= '%s' and et.lausenr <= '%s' ORDER BY et.lausenr;" % ( tnr, tnr, vlnr, slnr) h.tabel_a() for r in s.sqle(d.n, sql): h.tr_l() h.td(r['nr'], 'd') h.td(r['etlause'], 'd') h.td(r['vrolause'], 'd') h.tr_l() h.tabel_l()
def show_overview(): """ Show an overview over the blog. """ posts = psblog.get_all_posts() comments_num = 0 for p in posts: comments_num += len(p["comments"]) o = [html.h(2, "Overview"), html.p(html.a("?page=list", str(len(posts))+" Posts")), html.p(html.a("#TODO", str(comments_num)+" Comments")), html.p(html.a("?page=add_new", "Add New Post")), html.p(html.a("?page=compile", "Re-Compile"))] o = html.block("".join(o)) o += html.block(html.p("last compile log:"+html.pre( psblog.readfile(config.log_dir+"compile.log")))) log_ls = psblog.readfile(config.log_dir+"psblog.log").splitlines() log_ls.reverse() o += html.block(html.p("blog log:"+html.pre("\n".join(log_ls)))) print(html.render_admin(o))
def get_review(request, args): [hexsha] = args review = db.get("open_reviews", hexsha) patch = html.a("patch", href=html.absolute("patch", hexsha)) buttons = html.div(patch) header = html.div(review_to_html_summary((hexsha, review)), html.hr(), buttons) diff = review_to_diff(review) return html_page( "Review {}".format(hexsha[0:12]), header, *map(diff_to_html, diff), onload="initComments('{}');".format(hexsha) )
def show_list(): """ Show a list of all posts. """ posts = psblog.get_all_posts() ls_data = [] for p in posts: ls_data.append("".join([ p["meta"]["datetime"].strftime("%x")," - ", html.a("?page=single&id="+psblog.post_id(p["meta"]), p["meta"]["heading"]), " - ", str(len(p["comments"])), " comment(s)"])) print(html.render_admin(html.block(html.ul(ls_data))))
def gen_pager(messages, position): num_pages = (len(messages) + MESSAGES_PER_PAGE - 1) / MESSAGES_PER_PAGE this_page = position / MESSAGES_PER_PAGE result = [] for i in range(num_pages): label = "%i" % (i + 1) if this_page == i: result.append(label) else: if i == 0: url = "/" else: url = "/?start=%i" % (i * MESSAGES_PER_PAGE) result.append(html.a(label, href=url)) return html.div("Page: ", *(" ".join(result)), style="float: right; width: 50%; text-align: right;")
def witekstideinfo(): wilauseid = witekstidearv() h.k("Eesti - võru paralleelkorpuste andmebaasis on (") h.a(wilauseid) h.k(") lausehaaval käsitsi joondatud teksti.") h.k(" Ühes keeles on mitu lauset koos juhul kui teises keeles on liitlause." ) h.k(" Tekstide loetelus on toodud iga teksti esimene lause.") h.kr("<br>") h.k("Eestikeelseid sõnu on ") h.a(et_sõnadearv()) h.k(", võrukeelseid sõnu on ") h.a(vro_sõnadearv()) h.kr(".<br>")
#!/usr/bin/env python import cgitb cgitb.enable() import cgi import shelve import html import math MAX_PAGES = 20 MESSAGES_PER_PAGE = 20 MIME_LINK = html.a("[Should I be excited about this?]", href="https://www.reddit.com/r/Solving_A858/comments/" "24vml1/mime_type/chb5k2e?context=3") form = cgi.FieldStorage() db = shelve.open("../archive.db", 'r') def reddit_user(username): return html.a("/u/" + username, href="https://www.reddit.com/u/" + username) def credits(): return "By %s and %s" % (reddit_user("fragglet"), reddit_user("kamalist")) def expander(name, inner): control = html.span("►", id="control-%s" % name, onclick='expand("%s");' % name)
def get_ref(ref): commits = html.a("commits", href=html.absolute("commits", ref.name)) tree = html.a("tree", href=html.absolute("tree", ref.name)) return html.tr(html.td(ref.name), html.td(commits), html.td(tree))
#!/usr/bin/env python import cgitb cgitb.enable() import cgi import shelve import html import math MAX_PAGES = 20 MESSAGES_PER_PAGE = 20 MIME_LINK = html.a("[Should I be excited about this?]", href="https://www.reddit.com/r/Solving_A858/comments/" "24vml1/mime_type/chb5k2e?context=3") form = cgi.FieldStorage() db = shelve.open("../archive.db", 'r') def reddit_user(username): return html.a("/u/" + username, href="https://www.reddit.com/u/" + username) def credits(): return "By %s and %s" % (reddit_user("fragglet"), reddit_user("kamalist")) def expander(name, inner): control = html.span("►",
def review_to_html_summary(r): hexsha, review = r commits = html.ul(*map(lambda c: html.li(*commit_to_html(repo.commit(c))), review["includedCommits"])) return html.div(html.h1(html.a(hexsha[0:12], href=html.absolute("review", hexsha))), commits, **{"class": "review"})
def reddit_user(username): return html.a("/u/" + username, href="https://www.reddit.com/u/" + username)
def commit_to_html(commit): link = html.a(commit.hexsha[0:12], href=html.absolute("commit", commit.hexsha)) return link, " ", commit.summary
def timezone_link(zone): return html.a(zone, href="https://en.wikipedia.org/wiki/%s" % zone)
def make_pages(dbx, dirname): """ Nageneruj stranky a obrazky do adresare dirname """ def add_stat_to_group(groups, groupname, statid): try: groups[groupname].append(statid) except KeyError: groups[groupname] = [statid] def stat_min_date(stat): ''' vrat nejmensi datum v datove rade statistiky stat = [ (datum, hodnota), (datum, hodnota) ...] ''' return min(func.lmap(lambda x: x[0], stat)) if stat else None def stat_max_date(stat): ''' obdobne vrat nejvetsi datum ''' return max(func.lmap(lambda x: x[0], stat)) if stat else None # priprava adresare try: shutil.rmtree(dirname) except: pass try: func.makedir(dirname) except: pass try: func.makedir(dirname + "/img") except: pass s = func.clsMyStat(dbx, '') stats = s.getAllStats() i, statnames, statnames_index, groups = 0, {}, {}, {} # vytvor seznam vsech generovanych grafu: mixed_graphs = {} # pridej automaticky vytvareny seznam nejvice tweetujicich uzivatelu best_twitters = {} for stat in stats: if re.search(r'TWITTER_(.+?)_TWEETS', stat): mystat = Stat(stat, get_stat_for_graph(dbx, stat)) best_twitters[stat] = mystat.max() sorted_twitters = sorted(best_twitters.items(), key=operator.itemgetter(1))[-7:] stat_id = 'BEST_TWITTERS' mixed_graphs[stat_id] = [x[0] for x in sorted_twitters] add_stat_to_group(groups, 'Porovnání', stat_id) # 1) nacti ty z konfigurace, preved na hashtabulku for line in func.getconfig('config/graphs'): lineparts = func.lmap(str.strip, line.split(' ')) mixed_graphs[lineparts[0]] = lineparts[1:] statnames[lineparts[0]] = lineparts[0] add_stat_to_group(groups, 'Porovnání', lineparts[0]) # 2) pridej automaticky vytvarene twitter kombinovane grafy # TWEETS, FOLLOWERS a LIKES for stat in stats: found = re.search(r'TWITTER_(.+?)_TWEETS', stat) if found: statid = "TWITTER_%s" % found.group(1) mixed_graphs[statid] = [ stat, "TWITTER_%s_FOLLOWERS" % found.group(1), "TWITTER_%s_LIKES" % found.group(1) ] statnames[statid] = "Twitter %s" % found.group(1) # default jmeno statnames_index[statid] = "%s" % found.group( 1) # default jmeno na titulni stranku add_stat_to_group(groups, 'Twitteři', statid) # 3) pridej vsechny ostatni statistiky, vynechej TWITTERY # vytvor ponekud nesystemove defaultni nazvy for stat in stats: if not re.search(r'TWITTER_(.+)', stat): mixed_graphs[stat] = [stat] found = re.search(r'BALANCE_(.+)', stat) if found: statnames[stat] = "Zůstatek %s" % found.group(1) add_stat_to_group(groups, 'Finance', stat) continue found = re.search(r'PI_MEMBERS_(.+)', stat) if found: statnames[stat] = "Počet členů %s" % found.group(1) add_stat_to_group(groups, 'Členové', stat) continue found = re.search(r'YOUTUBE_(.+)', stat) if found: statnames[stat] = "Youtube %s" % found.group(1) add_stat_to_group(groups, 'Youtube', stat) continue found = re.search(r'PP_(.+)', stat) if found: add_stat_to_group(groups, 'Finanční tým', stat) continue found = re.search(r'REDMINE_(.+)', stat) if found: add_stat_to_group(groups, 'Odbory a složky strany na Redmine', stat) continue add_stat_to_group(groups, 'Ostatní', stat) # donacti jmena statistik z konfigurace for line in func.getconfig('config/statnames'): try: (a, b) = line.split('\t', 2) statnames[a] = b except ValueError: pass # titulni stranka & assets mybody = "" for groupname in groups: paragraph = [] for statid in groups[groupname]: if statid in statnames_index.keys(): statname = statnames_index[statid] elif statid in statnames.keys(): statname = statnames[statid] else: statname = statid paragraph.append(html.a("%s.delta.htm" % statid, statname)) paragraph.sort() mybody += html.h2(groupname) + html.p(",\n".join(paragraph)) page = func.replace_all( func.readfile('templates/index.htm'), { '%body%': mybody, '%stat_date%': '{0:%d.%m.%Y %H:%M:%S}'.format( datetime.datetime.now()) }) func.writefile(page, "%s/index.htm" % dirname) shutil.copytree('templates/assets', "%s/assets" % dirname) # Vytvor vsechny kombinovane grafy, vynech statistiky s nejvyse jednou hodnotou for statid in mixed_graphs: if arg('s') and statid != arg('s'): continue i += 1 # graf involved_stats, involved_deltas = {}, {} statInstances = [] for invstat in mixed_graphs[statid]: tmpstat = get_stat_for_graph(dbx, invstat) involved_stats[invstat] = tmpstat statInstances.append(Stat(invstat, involved_stats[invstat])) # spocitej delta statistiku deltastat, lastvalue = [], None for entry in tmpstat: deltastat.append([ entry[0], 0 if lastvalue is None else entry[1] - lastvalue ]) lastvalue = entry[1] involved_deltas[invstat] = deltastat singlestat = (len(involved_stats.values()) == 1) if max(func.lmap(len, involved_stats.values( ))) > 0: # involved_stats musi obsahovat aspon 1 radu o >=1 hodnotach print("[%s/%s]: Creating %s \r" % (i, len(mixed_graphs), statid), end='\r') # zakladni a delta graf make_graph(involved_stats, "%s/img/%s.png" % (dirname, statid), delta=False) make_graph(involved_deltas, "%s/img/%s.delta.png" % (dirname, statid), delta=True) # metody ziskani dat method_list = "" for stat in involved_stats: try: desc = involved_stats[stat][-1:][0][2] except IndexError: desc = "Neznámá metoda" method_list += "%s: %s<br>" % (stat, desc) # html stranka statname = statnames[statid] if statid in statnames.keys( ) else statid min_date = min( func.lmap(stat_min_date, filter(lambda x: x, involved_stats.values()))) # rozsah dat max_date = max( func.lmap(stat_max_date, filter(lambda x: x, involved_stats.values()))) bottom_links = html.h2("Metody získání dat") + \ html.p("Vypsána je vždy poslední použitá metoda, úplný seznam je v CSV souboru." + html.br()*2 + method_list) + \ ((html.a("%s.csv" % statid, "Zdrojová data ve formátu CSV") + html.br()) if singlestat else "") + \ html.a("index.htm", "Všechny metriky") try: min_value = str(min(map(lambda x: x.min(), statInstances))) except TypeError: min_value = '-' try: max_value = str(max(map(lambda x: x.max(), statInstances))) except TypeError: max_value = '-' common_replaces = { '%stat_name%': statname, '%stat_desc%': '', '%stat_id%': statid, '%stat_date%': '{0:%d.%m.%Y %H:%M:%S}'.format(datetime.datetime.now()), '%bottomlinks%': bottom_links, '%daterange%': '%s - %s' % (min_date, max_date), '%max%': max_value, '%min%': min_value } page = func.replace_all( func.readfile('templates/stat.htm'), merge_dicts( common_replaces, { '%stat_image%': "img/%s.png" % statid, '%stat_type%': "Absolutní hodnoty" })) func.writefile(page, "%s/%s.htm" % (dirname, statid)) page = func.replace_all( func.readfile('templates/stat.htm'), merge_dicts( common_replaces, { '%stat_image%': "img/%s.delta.png" % statid, '%stat_type%': "Denní přírůstky (delta)" })) func.writefile(page, "%s/%s.delta.htm" % (dirname, statid)) # vytvor CSV soubor se zdrojovymi daty if singlestat: csv_rows = [ "%s;%s;%s;%s;" % (statid, "{:%d.%m.%Y}".format(x[0]), x[1], x[2]) for x in list(involved_stats.values())[0] ] func.writefile( "stat_id;date;value;method;\n" + "\n".join(csv_rows), "%s/%s.csv" % (dirname, statid))