class UploadStatusPage(UploadResultsRendererMixin, rend.Page): docFactory = getxmlfile("upload-status.xhtml") def __init__(self, data): rend.Page.__init__(self, data) self.upload_status = data def upload_results(self): return defer.maybeDeferred(self.upload_status.get_results) def render_results(self, ctx, data): d = self.upload_results() def _got_results(results): if results: return ctx.tag return "" d.addCallback(_got_results) return d def render_started(self, ctx, data): started_s = render_time(data.get_started()) return started_s def render_si(self, ctx, data): si_s = base32.b2a_or_none(data.get_storage_index()) if si_s is None: si_s = "(None)" return si_s def render_helper(self, ctx, data): return {True: "Yes", False: "No"}[data.using_helper()] def render_total_size(self, ctx, data): size = data.get_size() if size is None: return "(unknown)" return size def render_progress_hash(self, ctx, data): progress = data.get_progress()[0] # TODO: make an ascii-art bar return "%.1f%%" % (100.0 * progress) def render_progress_ciphertext(self, ctx, data): progress = data.get_progress()[1] # TODO: make an ascii-art bar return "%.1f%%" % (100.0 * progress) def render_progress_encode_push(self, ctx, data): progress = data.get_progress()[2] # TODO: make an ascii-art bar return "%.1f%%" % (100.0 * progress) def render_status(self, ctx, data): return data.get_status()
class HelperStatus(rend.Page): docFactory = getxmlfile("helper.xhtml") def __init__(self, helper): rend.Page.__init__(self, helper) self.helper = helper def renderHTTP(self, ctx): req = inevow.IRequest(ctx) t = get_arg(req, "t") if t == "json": return self.render_JSON(req) return rend.Page.renderHTTP(self, ctx) def data_helper_stats(self, ctx, data): return self.helper.get_stats() def render_JSON(self, req): req.setHeader("content-type", "text/plain") if self.helper: stats = self.helper.get_stats() return simplejson.dumps(stats, indent=1) + "\n" return simplejson.dumps({}) + "\n" def render_active_uploads(self, ctx, data): return data["chk_upload_helper.active_uploads"] def render_incoming(self, ctx, data): return "%d bytes in %d files" % ( data["chk_upload_helper.incoming_size"], data["chk_upload_helper.incoming_count"]) def render_encoding(self, ctx, data): return "%d bytes in %d files" % ( data["chk_upload_helper.encoding_size"], data["chk_upload_helper.encoding_count"]) def render_upload_requests(self, ctx, data): return str(data["chk_upload_helper.upload_requests"]) def render_upload_already_present(self, ctx, data): return str(data["chk_upload_helper.upload_already_present"]) def render_upload_need_upload(self, ctx, data): return str(data["chk_upload_helper.upload_need_upload"]) def render_upload_bytes_fetched(self, ctx, data): return str(data["chk_upload_helper.fetched_bytes"]) def render_upload_bytes_encoded(self, ctx, data): return str(data["chk_upload_helper.encoded_bytes"])
class CheckAndRepairResultsRenderer(CheckerBase, rend.Page, ResultsBase): docFactory = getxmlfile("check-and-repair-results.xhtml") def __init__(self, client, results): self.client = client self.r = None if results: self.r = ICheckAndRepairResults(results) rend.Page.__init__(self, results) def json(self, ctx): inevow.IRequest(ctx).setHeader("content-type", "text/plain") data = json_check_and_repair_results(self.r) return simplejson.dumps(data, indent=1) + "\n" def render_summary(self, ctx, data): cr = data.get_post_repair_results() results = [] if cr.is_healthy(): results.append("Healthy") elif cr.is_recoverable(): results.append("Not Healthy!") else: results.append("Not Recoverable!") results.append(" : ") results.append(self._html(cr.get_summary())) return ctx.tag[results] def render_repair_results(self, ctx, data): if data.get_repair_attempted(): if data.get_repair_successful(): return ctx.tag["Repair successful"] else: return ctx.tag["Repair unsuccessful"] return ctx.tag["No repair necessary"] def render_post_repair_results(self, ctx, data): cr = self._render_results(ctx, data.get_post_repair_results()) return ctx.tag[T.div["Post-Repair Checker Results:"], cr] def render_maybe_pre_repair_results(self, ctx, data): if data.get_repair_attempted(): cr = self._render_results(ctx, data.get_pre_repair_results()) return ctx.tag[T.div["Pre-Repair Checker Results:"], cr] return ""
class CheckResults(CheckerBase, rend.Page, ResultsBase): docFactory = getxmlfile("check-results.xhtml") def __init__(self, client, results): self.client = client self.r = ICheckResults(results) rend.Page.__init__(self, results) def json(self, ctx): inevow.IRequest(ctx).setHeader("content-type", "text/plain") data = json_check_results(self.r) return simplejson.dumps(data, indent=1) + "\n" def render_summary(self, ctx, data): results = [] if data.is_healthy(): results.append("Healthy") elif data.is_recoverable(): results.append("Not Healthy!") else: results.append("Not Recoverable!") results.append(" : ") results.append(self._html(data.get_summary())) return ctx.tag[results] def render_repair(self, ctx, data): if data.is_healthy(): return "" repair = T.form(action=".", method="post", enctype="multipart/form-data")[ T.fieldset[ T.input(type="hidden", name="t", value="check"), T.input(type="hidden", name="repair", value="true"), T.input(type="submit", value="Repair"), ]] return "" # repair button disabled until we make it work correctly, # see #622 for details return ctx.tag[repair] def render_results(self, ctx, data): cr = self._render_results(ctx, data) return ctx.tag[cr]
class LiteralCheckResultsRenderer(rend.Page, ResultsBase): docFactory = getxmlfile("literal-check-results.xhtml") def __init__(self, client): self.client = client rend.Page.__init__(self, client) def renderHTTP(self, ctx): if self.want_json(ctx): return self.json(ctx) return rend.Page.renderHTTP(self, ctx) def json(self, ctx): inevow.IRequest(ctx).setHeader("content-type", "text/plain") data = json_check_results(None) return simplejson.dumps(data, indent=1) + "\n" def render_return(self, ctx, data): req = inevow.IRequest(ctx) return_to = get_arg(req, "return_to", None) if return_to: return T.div[T.a(href=return_to)["Return to file."]] return ""
class ReliabilityTool(rend.Page): addSlash = True docFactory = getxmlfile("reliability.xhtml") DEFAULT_PARAMETERS = [ ("drive_lifetime", "8Y", "time", "Average drive lifetime"), ("k", 3, "int", "Minimum number of shares needed to recover the file"), ("R", 7, "int", "Repair threshold: repair will not occur until fewer than R shares " "are left"), ("N", 10, "int", "Total number of shares of the file generated"), ("delta", "1M", "time", "Amount of time between each simulation step"), ("check_period", "1M", "time", "How often to run the checker and repair if fewer than R shares"), ("report_period", "3M", "time", "Amount of time between result rows in this report"), ("report_span", "5Y", "time", "Total amount of time covered by this report"), ] def parse_time(self, s): if s.endswith("M"): return int(s[:-1]) * MONTH if s.endswith("Y"): return int(s[:-1]) * YEAR return int(s) def format_time(self, s): if s % YEAR == 0: return "%dY" % (s / YEAR) if s % MONTH == 0: return "%dM" % (s / MONTH) return "%d" % s def get_parameters(self, ctx): parameters = {} for (name, default, argtype, description) in self.DEFAULT_PARAMETERS: v = get_arg(ctx, name, default) if argtype == "time": value = self.parse_time(v) else: value = int(v) parameters[name] = value return parameters def renderHTTP(self, ctx): self.parameters = self.get_parameters(ctx) self.results = reliability.ReliabilityModel.run(**self.parameters) return rend.Page.renderHTTP(self, ctx) def make_input(self, name, old_value): return T.input(name=name, type="text", size="5", value=self.format_time(old_value)) def render_forms(self, ctx, data): f = T.form(action=".", method="get") table = [] for (name, default_value, argtype, description) in self.DEFAULT_PARAMETERS: old_value = self.parameters[name] i = self.make_input(name, old_value) table.append(T.tr[T.td[name + ":"], T.td[i], T.td[description]]) go = T.input(type="submit", value="Recompute") return [ T.h2["Simulation Parameters:"], f[T.table[table], go], ] def data_simulation_table(self, ctx, data): for row in self.results.samples: yield row def render_simulation_row(self, ctx, row): (when, unmaintained_shareprobs, maintained_shareprobs, P_repaired_last_check_period, cumulative_number_of_repairs, cumulative_number_of_new_shares, P_dead_unmaintained, P_dead_maintained) = row ctx.fillSlots("t", yandm(when)) ctx.fillSlots("P_repair", "%.6f" % P_repaired_last_check_period) ctx.fillSlots("P_dead_unmaintained", "%.6g" % P_dead_unmaintained) ctx.fillSlots("P_dead_maintained", "%.6g" % P_dead_maintained) return ctx.tag def render_report_span(self, ctx, row): (when, unmaintained_shareprobs, maintained_shareprobs, P_repaired_last_check_period, cumulative_number_of_repairs, cumulative_number_of_new_shares, P_dead_unmaintained, P_dead_maintained) = self.results.samples[-1] return ctx.tag[yandm(when)] def render_P_loss_unmaintained(self, ctx, row): (when, unmaintained_shareprobs, maintained_shareprobs, P_repaired_last_check_period, cumulative_number_of_repairs, cumulative_number_of_new_shares, P_dead_unmaintained, P_dead_maintained) = self.results.samples[-1] return ctx.tag["%.6g (%1.8f%%)" % (P_dead_unmaintained, 100 * P_dead_unmaintained)] def render_P_loss_maintained(self, ctx, row): (when, unmaintained_shareprobs, maintained_shareprobs, P_repaired_last_check_period, cumulative_number_of_repairs, cumulative_number_of_new_shares, P_dead_unmaintained, P_dead_maintained) = self.results.samples[-1] return ctx.tag["%.6g (%1.8f%%)" % (P_dead_maintained, 100 * P_dead_maintained)] def render_P_repair_rate(self, ctx, row): (when, unmaintained_shareprobs, maintained_shareprobs, P_repaired_last_check_period, cumulative_number_of_repairs, cumulative_number_of_new_shares, P_dead_unmaintained, P_dead_maintained) = self.results.samples[-1] freq = when / cumulative_number_of_repairs return ctx.tag["%.6g" % freq] def render_P_repair_shares(self, ctx, row): (when, unmaintained_shareprobs, maintained_shareprobs, P_repaired_last_check_period, cumulative_number_of_repairs, cumulative_number_of_new_shares, P_dead_unmaintained, P_dead_maintained) = self.results.samples[-1] generated_shares = cumulative_number_of_new_shares / cumulative_number_of_repairs return ctx.tag["%1.2f" % generated_shares]
class PublishStatusPage(rend.Page, RateAndTimeMixin): docFactory = getxmlfile("publish-status.xhtml") def __init__(self, data): rend.Page.__init__(self, data) self.publish_status = data def render_started(self, ctx, data): started_s = render_time(data.get_started()) return started_s def render_si(self, ctx, data): si_s = base32.b2a_or_none(data.get_storage_index()) if si_s is None: si_s = "(None)" return si_s def render_helper(self, ctx, data): return {True: "Yes", False: "No"}[data.using_helper()] def render_current_size(self, ctx, data): size = data.get_size() if size is None: size = "(unknown)" return size def render_progress(self, ctx, data): progress = data.get_progress() # TODO: make an ascii-art bar return "%.1f%%" % (100.0 * progress) def render_status(self, ctx, data): return data.get_status() def render_encoding(self, ctx, data): k, n = data.get_encoding() return ctx.tag["Encoding: %s of %s" % (k, n)] def render_sharemap(self, ctx, data): servermap = data.get_servermap() if servermap is None: return ctx.tag["None"] l = T.ul() sharemap = servermap.make_sharemap() for shnum in sorted(sharemap.keys()): l[T.li["%d -> Placed on " % shnum, ", ".join( ["[%s]" % server.get_name() for server in sharemap[shnum]])]] return ctx.tag["Sharemap:", l] def render_problems(self, ctx, data): problems = data.get_problems() if not problems: return "" l = T.ul() # XXX: is this exercised? I don't think PublishStatus.problems is # ever populated for peerid in sorted(problems.keys()): peerid_s = idlib.shortnodeid_b2a(peerid) l[T.li["[%s]: %s" % (peerid_s, problems[peerid])]] return ctx.tag["Server Problems:", l] def _get_rate(self, data, name): file_size = self.publish_status.get_size() duration = self.publish_status.timings.get(name) return compute_rate(file_size, duration) def data_time_total(self, ctx, data): return self.publish_status.timings.get("total") def data_rate_total(self, ctx, data): return self._get_rate(data, "total") def data_time_setup(self, ctx, data): return self.publish_status.timings.get("setup") def data_time_encrypt(self, ctx, data): return self.publish_status.timings.get("encrypt") def data_rate_encrypt(self, ctx, data): return self._get_rate(data, "encrypt") def data_time_encode(self, ctx, data): return self.publish_status.timings.get("encode") def data_rate_encode(self, ctx, data): return self._get_rate(data, "encode") def data_time_pack(self, ctx, data): return self.publish_status.timings.get("pack") def data_rate_pack(self, ctx, data): return self._get_rate(data, "pack") def data_time_sign(self, ctx, data): return self.publish_status.timings.get("sign") def data_time_push(self, ctx, data): return self.publish_status.timings.get("push") def data_rate_push(self, ctx, data): return self._get_rate(data, "push") def render_server_timings(self, ctx, data): per_server = self.publish_status.timings.get("send_per_server") if not per_server: return "" l = T.ul() for server in sorted(per_server.keys(), key=lambda s: s.get_name()): times_s = ", ".join( [self.render_time(None, t) for t in per_server[server]]) l[T.li["[%s]: %s" % (server.get_name(), times_s)]] return T.li["Per-Server Response Times: ", l]
class DownloadStatusPage(DownloadResultsRendererMixin, rend.Page): docFactory = getxmlfile("download-status.xhtml") def __init__(self, data): rend.Page.__init__(self, data) self.download_status = data def child_timeline(self, ctx): return DownloadStatusTimelinePage(self.download_status) def download_results(self): return defer.maybeDeferred(self.download_status.get_results) def relative_time(self, t): if t is None: return t if self.download_status.first_timestamp is not None: return t - self.download_status.first_timestamp return t def short_relative_time(self, t): t = self.relative_time(t) if t is None: return "" return "+%.6fs" % t def _find_overlap(self, events, start_key, end_key): # given a list of event dicts, return a new list in which each event # has an extra "row" key (an int, starting at 0), and if appropriate # a "serverid" key (ascii-encoded server id), replacing the "server" # key. This is a hint to our JS frontend about how to overlap the # parts of the graph it is drawing. # we must always make a copy, since we're going to be adding keys # and don't want to change the original objects. If we're # stringifying serverids, we'll also be changing the serverid keys. new_events = [] rows = [] for ev in events: ev = ev.copy() if ev.has_key('server'): ev["serverid"] = ev["server"].get_longname() del ev["server"] # find an empty slot in the rows free_slot = None for row, finished in enumerate(rows): if finished is not None: if ev[start_key] > finished: free_slot = row break if free_slot is None: free_slot = len(rows) rows.append(ev[end_key]) else: rows[free_slot] = ev[end_key] ev["row"] = free_slot new_events.append(ev) return new_events def _find_overlap_requests(self, events): """We compute a three-element 'row tuple' for each event: (serverid, shnum, row). All elements are ints. The first is a mapping from serverid to group number, the second is a mapping from shnum to subgroup number. The third is a row within the subgroup. We also return a list of lists of rowcounts, so renderers can decide how much vertical space to give to each row. """ serverid_to_group = {} groupnum_to_rows = {} # maps groupnum to a table of rows. Each table # is a list with an element for each row number # (int starting from 0) that contains a # finish_time, indicating that the row is empty # beyond that time. If finish_time is None, it # indicate a response that has not yet # completed, so the row cannot be reused. new_events = [] for ev in events: # DownloadStatus promises to give us events in temporal order ev = ev.copy() ev["serverid"] = ev["server"].get_longname() del ev["server"] if ev["serverid"] not in serverid_to_group: groupnum = len(serverid_to_group) serverid_to_group[ev["serverid"]] = groupnum groupnum = serverid_to_group[ev["serverid"]] if groupnum not in groupnum_to_rows: groupnum_to_rows[groupnum] = [] rows = groupnum_to_rows[groupnum] # find an empty slot in the rows free_slot = None for row, finished in enumerate(rows): if finished is not None: if ev["start_time"] > finished: free_slot = row break if free_slot is None: free_slot = len(rows) rows.append(ev["finish_time"]) else: rows[free_slot] = ev["finish_time"] ev["row"] = (groupnum, free_slot) new_events.append(ev) del groupnum # maybe also return serverid_to_group, groupnum_to_rows, and some # indication of the highest finish_time # # actually, return the highest rownum for each groupnum highest_rownums = [ len(groupnum_to_rows[groupnum]) for groupnum in range(len(serverid_to_group)) ] return new_events, highest_rownums def child_event_json(self, ctx): inevow.IRequest(ctx).setHeader("content-type", "text/plain") data = {} # this will be returned to the GET ds = self.download_status data["misc"] = self._find_overlap(ds.misc_events, "start_time", "finish_time") data["read"] = self._find_overlap(ds.read_events, "start_time", "finish_time") data["segment"] = self._find_overlap(ds.segment_events, "start_time", "finish_time") # TODO: overlap on DYHB isn't very useful, and usually gets in the # way. So don't do it. data["dyhb"] = self._find_overlap(ds.dyhb_requests, "start_time", "finish_time") data["block"], data["block_rownums"] = self._find_overlap_requests( ds.block_requests) server_info = {} # maps longname to {num,color,short} server_shortnames = {} # maps servernum to shortname for d_ev in ds.dyhb_requests: s = d_ev["server"] longname = s.get_longname() if longname not in server_info: num = len(server_info) server_info[longname] = { "num": num, "color": self.color(s), "short": s.get_name() } server_shortnames[str(num)] = s.get_name() data["server_info"] = server_info data["num_serverids"] = len(server_info) # we'd prefer the keys of serverids[] to be ints, but this is JSON, # so they get converted to strings. Stupid javascript. data["serverids"] = server_shortnames data["bounds"] = {"min": ds.first_timestamp, "max": ds.last_timestamp} return json.dumps(data, indent=1) + "\n" def render_timeline_link(self, ctx, data): from nevow import url return T.a(href=url.URL.fromContext(ctx).child("timeline"))["timeline"] def _rate_and_time(self, bytes, seconds): time_s = self.render_time(None, seconds) if seconds != 0: rate = self.render_rate(None, 1.0 * bytes / seconds) return T.span(title=rate)[time_s] return T.span[time_s] def render_events(self, ctx, data): if not self.download_status.storage_index: return srt = self.short_relative_time l = T.div() t = T.table(align="left", class_="status-download-events") t[T.tr[T.th["serverid"], T.th["sent"], T.th["received"], T.th["shnums"], T.th["RTT"]]] for d_ev in self.download_status.dyhb_requests: server = d_ev["server"] sent = d_ev["start_time"] shnums = d_ev["response_shnums"] received = d_ev["finish_time"] rtt = None if received is not None: rtt = received - sent if not shnums: shnums = ["-"] t[T.tr(style="background: %s" % self.color(server))[[ T.td[server.get_name()], T.td[srt(sent)], T.td[srt(received)], T.td[",".join([str(shnum) for shnum in shnums])], T.td[self.render_time(None, rtt)], ]]] l[T.h2["DYHB Requests:"], t] l[T.br(clear="all")] t = T.table(align="left", class_="status-download-events") t[T.tr[T.th["range"], T.th["start"], T.th["finish"], T.th["got"], T.th["time"], T.th["decrypttime"], T.th["pausedtime"], T.th["speed"]]] for r_ev in self.download_status.read_events: start = r_ev["start"] length = r_ev["length"] bytes = r_ev["bytes_returned"] decrypt_time = "" if bytes: decrypt_time = self._rate_and_time(bytes, r_ev["decrypt_time"]) speed, rtt = "", "" if r_ev["finish_time"] is not None: rtt = r_ev["finish_time"] - r_ev["start_time"] - r_ev[ "paused_time"] speed = self.render_rate(None, compute_rate(bytes, rtt)) rtt = self.render_time(None, rtt) paused = self.render_time(None, r_ev["paused_time"]) t[T.tr[T.td["[%d:+%d]" % (start, length)], T.td[srt(r_ev["start_time"])], T.td[srt(r_ev["finish_time"])], T.td[bytes], T.td[rtt], T.td[decrypt_time], T.td[paused], T.td[speed], ]] l[T.h2["Read Events:"], t] l[T.br(clear="all")] t = T.table(align="left", class_="status-download-events") t[T.tr[T.th["segnum"], T.th["start"], T.th["active"], T.th["finish"], T.th["range"], T.th["decodetime"], T.th["segtime"], T.th["speed"]]] for s_ev in self.download_status.segment_events: range_s = "-" segtime_s = "-" speed = "-" decode_time = "-" if s_ev["finish_time"] is not None: if s_ev["success"]: segtime = s_ev["finish_time"] - s_ev["active_time"] segtime_s = self.render_time(None, segtime) seglen = s_ev["segment_length"] range_s = "[%d:+%d]" % (s_ev["segment_start"], seglen) speed = self.render_rate(None, compute_rate(seglen, segtime)) decode_time = self._rate_and_time(seglen, s_ev["decode_time"]) else: # error range_s = "error" else: # not finished yet pass t[T.tr[T.td["seg%d" % s_ev["segment_number"]], T.td[srt(s_ev["start_time"])], T.td[srt(s_ev["active_time"])], T.td[srt(s_ev["finish_time"])], T.td[range_s], T.td[decode_time], T.td[segtime_s], T.td[speed]]] l[T.h2["Segment Events:"], t] l[T.br(clear="all")] t = T.table(align="left", class_="status-download-events") t[T.tr[T.th["serverid"], T.th["shnum"], T.th["range"], T.th["txtime"], T.th["rxtime"], T.th["received"], T.th["RTT"]]] for r_ev in self.download_status.block_requests: server = r_ev["server"] rtt = None if r_ev["finish_time"] is not None: rtt = r_ev["finish_time"] - r_ev["start_time"] color = self.color(server) t[T.tr(style="background: %s" % color)[T.td[server.get_name()], T.td[r_ev["shnum"]], T.td["[%d:+%d]" % (r_ev["start"], r_ev["length"])], T.td[srt(r_ev["start_time"])], T.td[srt(r_ev["finish_time"])], T.td[r_ev["response_length"] or ""], T.td[self.render_time(None, rtt)], ]] l[T.h2["Requests:"], t] l[T.br(clear="all")] return l def color(self, server): h = hashlib.sha256(server.get_serverid()).digest() def m(c): return min(ord(c) / 2 + 0x80, 0xff) return "#%02x%02x%02x" % (m(h[0]), m(h[1]), m(h[2])) def render_results(self, ctx, data): d = self.download_results() def _got_results(results): if results: return ctx.tag return "" d.addCallback(_got_results) return d def render_started(self, ctx, data): started_s = render_time(data.get_started()) return started_s + " (%s)" % data.get_started() def render_si(self, ctx, data): si_s = base32.b2a_or_none(data.get_storage_index()) if si_s is None: si_s = "(None)" return si_s def render_helper(self, ctx, data): return {True: "Yes", False: "No"}[data.using_helper()] def render_total_size(self, ctx, data): size = data.get_size() if size is None: return "(unknown)" return size def render_progress(self, ctx, data): progress = data.get_progress() # TODO: make an ascii-art bar return "%.1f%%" % (100.0 * progress) def render_status(self, ctx, data): return data.get_status()
class Status(MultiFormatPage): docFactory = getxmlfile("status.xhtml") addSlash = True def __init__(self, history): rend.Page.__init__(self, history) self.history = history def render_JSON(self, req): # modern browsers now render this instead of forcing downloads req.setHeader("content-type", "application/json") data = {} data["active"] = active = [] data["recent"] = recent = [] def _marshal_json(s): # common item data item = { "storage-index-string": base32.b2a_or_none(s.get_storage_index()), "total-size": s.get_size(), "status": s.get_status(), } # type-specific item date if IUploadStatus.providedBy(s): h, c, e = s.get_progress() item["type"] = "upload" item["progress-hash"] = h item["progress-ciphertext"] = c item["progress-encode-push"] = e elif IDownloadStatus.providedBy(s): item["type"] = "download" item["progress"] = s.get_progress() elif IPublishStatus.providedBy(s): item["type"] = "publish" elif IRetrieveStatus.providedBy(s): item["type"] = "retrieve" elif IServermapUpdaterStatus.providedBy(s): item["type"] = "mapupdate" item["mode"] = s.get_mode() else: item["type"] = "unknown" item["class"] = s.__class__.__name__ return item for s in self._get_active_operations(): active.append(_marshal_json(s)) for s in self._get_recent_operations(): recent.append(_marshal_json(s)) return json.dumps(data, indent=1) + "\n" def _get_all_statuses(self): h = self.history return itertools.chain( h.list_all_upload_statuses(), h.list_all_download_statuses(), h.list_all_mapupdate_statuses(), h.list_all_publish_statuses(), h.list_all_retrieve_statuses(), h.list_all_helper_statuses(), ) def data_active_operations(self, ctx, data): return self._get_active_operations() def _get_active_operations(self): active = [s for s in self._get_all_statuses() if s.get_active()] active.sort(lambda a, b: cmp(a.get_started(), b.get_started())) active.reverse() return active def data_recent_operations(self, ctx, data): return self._get_recent_operations() def _get_recent_operations(self): recent = [s for s in self._get_all_statuses() if not s.get_active()] recent.sort(lambda a, b: cmp(a.get_started(), b.get_started())) recent.reverse() return recent def render_row(self, ctx, data): s = data started_s = render_time(s.get_started()) ctx.fillSlots("started", started_s) si_s = base32.b2a_or_none(s.get_storage_index()) if si_s is None: si_s = "(None)" ctx.fillSlots("si", si_s) ctx.fillSlots("helper", {True: "Yes", False: "No"}[s.using_helper()]) size = s.get_size() if size is None: size = "(unknown)" elif isinstance(size, (int, long, float)): size = abbreviate_size(size) ctx.fillSlots("total_size", size) progress = data.get_progress() if IUploadStatus.providedBy(data): link = "up-%d" % data.get_counter() ctx.fillSlots("type", "upload") # TODO: make an ascii-art bar (chk, ciphertext, encandpush) = progress progress_s = ("hash: %.1f%%, ciphertext: %.1f%%, encode: %.1f%%" % ((100.0 * chk), (100.0 * ciphertext), (100.0 * encandpush))) ctx.fillSlots("progress", progress_s) elif IDownloadStatus.providedBy(data): link = "down-%d" % data.get_counter() ctx.fillSlots("type", "download") ctx.fillSlots("progress", "%.1f%%" % (100.0 * progress)) elif IPublishStatus.providedBy(data): link = "publish-%d" % data.get_counter() ctx.fillSlots("type", "publish") ctx.fillSlots("progress", "%.1f%%" % (100.0 * progress)) elif IRetrieveStatus.providedBy(data): ctx.fillSlots("type", "retrieve") link = "retrieve-%d" % data.get_counter() ctx.fillSlots("progress", "%.1f%%" % (100.0 * progress)) else: assert IServermapUpdaterStatus.providedBy(data) ctx.fillSlots("type", "mapupdate %s" % data.get_mode()) link = "mapupdate-%d" % data.get_counter() ctx.fillSlots("progress", "%.1f%%" % (100.0 * progress)) ctx.fillSlots("status", T.a(href=link)[s.get_status()]) return ctx.tag def childFactory(self, ctx, name): h = self.history stype, count_s = name.split("-") count = int(count_s) if stype == "up": for s in itertools.chain(h.list_all_upload_statuses(), h.list_all_helper_statuses()): # immutable-upload helpers use the same status object as a # regular immutable-upload if s.get_counter() == count: return UploadStatusPage(s) if stype == "down": for s in h.list_all_download_statuses(): if s.get_counter() == count: return DownloadStatusPage(s) if stype == "mapupdate": for s in h.list_all_mapupdate_statuses(): if s.get_counter() == count: return MapupdateStatusPage(s) if stype == "publish": for s in h.list_all_publish_statuses(): if s.get_counter() == count: return PublishStatusPage(s) if stype == "retrieve": for s in h.list_all_retrieve_statuses(): if s.get_counter() == count: return RetrieveStatusPage(s)
class ManifestResults(rend.Page, ReloadMixin): docFactory = getxmlfile("manifest.xhtml") def __init__(self, client, monitor): self.client = client self.monitor = monitor def renderHTTP(self, ctx): req = inevow.IRequest(ctx) output = get_arg(req, "output", "html").lower() if output == "text": return self.text(req) if output == "json": return self.json(req) return rend.Page.renderHTTP(self, ctx) def slashify_path(self, path): if not path: return "" return "/".join([p.encode("utf-8") for p in path]) def text(self, req): req.setHeader("content-type", "text/plain") lines = [] is_finished = self.monitor.is_finished() lines.append("finished: " + {True: "yes", False: "no"}[is_finished]) for (path, cap) in self.monitor.get_status()["manifest"]: lines.append(self.slashify_path(path) + " " + cap) return "\n".join(lines) + "\n" def json(self, req): req.setHeader("content-type", "text/plain") m = self.monitor s = m.get_status() if m.origin_si: origin_base32 = base32.b2a(m.origin_si) else: origin_base32 = "" status = { "stats": s["stats"], "finished": m.is_finished(), "origin": origin_base32, } if m.is_finished(): # don't return manifest/verifycaps/SIs unless the operation is # done, to save on CPU/memory (both here and in the HTTP client # who has to unpack the JSON). Tests show that the ManifestWalker # needs about 1092 bytes per item, the JSON we generate here # requires about 503 bytes per item, and some internal overhead # (perhaps transport-layer buffers in twisted.web?) requires an # additional 1047 bytes per item. status.update({ "manifest": s["manifest"], "verifycaps": [i for i in s["verifycaps"]], "storage-index": [i for i in s["storage-index"]], }) # simplejson doesn't know how to serialize a set. We use a # generator that walks the set rather than list(setofthing) to # save a small amount of memory (4B*len) and a moderate amount of # CPU. return simplejson.dumps(status, indent=1) def _si_abbrev(self): si = self.monitor.origin_si if not si: return "<LIT>" return base32.b2a(si)[:6] def render_title(self, ctx): return T.title["Manifest of SI=%s" % self._si_abbrev()] def render_header(self, ctx): return T.p["Manifest of SI=%s" % self._si_abbrev()] def data_items(self, ctx, data): return self.monitor.get_status()["manifest"] def render_row(self, ctx, (path, cap)): ctx.fillSlots("path", self.slashify_path(path)) root = get_root(ctx) # TODO: we need a clean consistent way to get the type of a cap string if cap: if cap.startswith("URI:CHK") or cap.startswith("URI:SSK"): nameurl = urllib.quote(path[-1].encode("utf-8")) uri_link = "%s/file/%s/@@named=/%s" % (root, urllib.quote(cap), nameurl) else: uri_link = "%s/uri/%s" % (root, urllib.quote(cap, safe="")) ctx.fillSlots("cap", T.a(href=uri_link)[cap]) else: ctx.fillSlots("cap", "") return ctx.tag
class Root(rend.Page): addSlash = True docFactory = getxmlfile("welcome.xhtml") _connectedalts = { "not-configured": "Not Configured", "yes": "Connected", "no": "Disconnected", } def __init__(self, client, clock=None, now_fn=None): rend.Page.__init__(self, client) self.client = client # If set, clock is a twisted.internet.task.Clock that the tests # use to test ophandle expiration. self.child_operations = operations.OphandleTable(clock) self.now_fn = now_fn try: s = client.getServiceNamed("storage") except KeyError: s = None self.child_storage = storage.StorageStatus(s, self.client.nickname) self.child_uri = URIHandler(client) self.child_cap = URIHandler(client) # handler for "/magic_folder" URIs self.child_magic_folder = magic_folder.MagicFolderWebApi(client) self.child_file = FileHandler(client) self.child_named = FileHandler(client) self.child_status = status.Status(client.get_history()) self.child_statistics = status.Statistics(client.stats_provider) static_dir = resource_filename("allmydata.web", "static") for filen in os.listdir(static_dir): self.putChild(filen, nevow_File(os.path.join(static_dir, filen))) def child_helper_status(self, ctx): # the Helper isn't attached until after the Tub starts, so this child # needs to created on each request return status.HelperStatus(self.client.helper) child_report_incident = IncidentReporter() #child_server # let's reserve this for storage-server-over-HTTP # FIXME: This code is duplicated in root.py and introweb.py. def data_rendered_at(self, ctx, data): return render_time(time.time()) def data_version(self, ctx, data): return get_package_versions_string() def data_import_path(self, ctx, data): return str(allmydata) def render_my_nodeid(self, ctx, data): tubid_s = "TubID: " + self.client.get_long_tubid() return T.td(title=tubid_s)[self.client.get_long_nodeid()] def data_my_nickname(self, ctx, data): return self.client.nickname def render_magic_folder(self, ctx, data): if self.client._magic_folder is None: return T.p() (ok, messages) = self.client._magic_folder.get_public_status() if ok: ctx.fillSlots("magic_folder_status", "yes") ctx.fillSlots("magic_folder_status_alt", "working") else: ctx.fillSlots("magic_folder_status", "no") ctx.fillSlots("magic_folder_status_alt", "not working") status = T.ul() for msg in messages: status[T.li[str(msg)]] return ctx.tag[status] def render_services(self, ctx, data): ul = T.ul() try: ss = self.client.getServiceNamed("storage") stats = ss.get_stats() if stats["storage_server.accepting_immutable_shares"]: msg = "accepting new shares" else: msg = "not accepting new shares (read-only)" available = stats.get("storage_server.disk_avail") if available is not None: msg += ", %s available" % abbreviate_size(available) ul[T.li[T.a(href="storage")["Storage Server"], ": ", msg]] except KeyError: ul[T.li["Not running storage server"]] if self.client.helper: stats = self.client.helper.get_stats() active_uploads = stats["chk_upload_helper.active_uploads"] ul[T.li["Helper: %d active uploads" % (active_uploads, )]] else: ul[T.li["Not running helper"]] return ctx.tag[ul] def data_introducer_furl_prefix(self, ctx, data): ifurl = self.client.introducer_furl # trim off the secret swissnum (prefix, _, swissnum) = ifurl.rpartition("/") if not ifurl: return None if swissnum == "introducer": return ifurl else: return "%s/[censored]" % (prefix, ) def data_introducer_description(self, ctx, data): if self.data_connected_to_introducer(ctx, data) == "no": return "Introducer not connected" return "Introducer" def data_connected_to_introducer(self, ctx, data): if self.client.connected_to_introducer(): return "yes" return "no" def data_connected_to_introducer_alt(self, ctx, data): return self._connectedalts[self.data_connected_to_introducer( ctx, data)] def data_helper_furl_prefix(self, ctx, data): try: uploader = self.client.getServiceNamed("uploader") except KeyError: return None furl, connected = uploader.get_helper_info() if not furl: return None # trim off the secret swissnum (prefix, _, swissnum) = furl.rpartition("/") return "%s/[censored]" % (prefix, ) def data_helper_description(self, ctx, data): if self.data_connected_to_helper(ctx, data) == "no": return "Helper not connected" return "Helper" def data_connected_to_helper(self, ctx, data): try: uploader = self.client.getServiceNamed("uploader") except KeyError: return "no" # we don't even have an Uploader furl, connected = uploader.get_helper_info() if furl is None: return "not-configured" if connected: return "yes" return "no" def data_connected_to_helper_alt(self, ctx, data): return self._connectedalts[self.data_connected_to_helper(ctx, data)] def data_known_storage_servers(self, ctx, data): sb = self.client.get_storage_broker() return len(sb.get_all_serverids()) def data_connected_storage_servers(self, ctx, data): sb = self.client.get_storage_broker() return len(sb.get_connected_servers()) def data_services(self, ctx, data): sb = self.client.get_storage_broker() return sorted(sb.get_known_servers(), key=lambda s: s.get_serverid()) def render_service_row(self, ctx, server): nodeid = server.get_serverid() ctx.fillSlots("peerid", server.get_longname()) ctx.fillSlots("nickname", server.get_nickname()) rhost = server.get_remote_host() if server.is_connected(): if nodeid == self.client.nodeid: rhost_s = "(loopback)" elif isinstance(rhost, address.IPv4Address): rhost_s = "%s:%d" % (rhost.host, rhost.port) else: rhost_s = str(rhost) addr = rhost_s service_connection_status = "yes" last_connect_time = server.get_last_connect_time() service_connection_status_rel_time = render_time_delta( last_connect_time, self.now_fn()) service_connection_status_abs_time = render_time_attr( last_connect_time) else: addr = "N/A" service_connection_status = "no" last_loss_time = server.get_last_loss_time() service_connection_status_rel_time = render_time_delta( last_loss_time, self.now_fn()) service_connection_status_abs_time = render_time_attr( last_loss_time) last_received_data_time = server.get_last_received_data_time() last_received_data_rel_time = render_time_delta( last_received_data_time, self.now_fn()) last_received_data_abs_time = render_time_attr(last_received_data_time) announcement = server.get_announcement() version = announcement["my-version"] available_space = server.get_available_space() if available_space is None: available_space = "N/A" else: available_space = abbreviate_size(available_space) ctx.fillSlots("address", addr) ctx.fillSlots("service_connection_status", service_connection_status) ctx.fillSlots("service_connection_status_alt", self._connectedalts[service_connection_status]) ctx.fillSlots("connected-bool", bool(rhost)) ctx.fillSlots("service_connection_status_abs_time", service_connection_status_abs_time) ctx.fillSlots("service_connection_status_rel_time", service_connection_status_rel_time) ctx.fillSlots("last_received_data_abs_time", last_received_data_abs_time) ctx.fillSlots("last_received_data_rel_time", last_received_data_rel_time) ctx.fillSlots("version", version) ctx.fillSlots("available_space", available_space) return ctx.tag def render_download_form(self, ctx, data): # this is a form where users can download files by URI form = T.form( action="uri", method="get", enctype="multipart/form-data")[T.fieldset[ T.legend(class_="freeform-form-label")["Download a file"], T.div["Tahoe-URI to download:" + SPACE, T.input(type="text", name="uri")], T.div["Filename to download as:" + SPACE, T.input(type="text", name="filename")], T.input(type="submit", value="Download!"), ]] return T.div[form] def render_view_form(self, ctx, data): # this is a form where users can download files by URI, or jump to a # named directory form = T.form( action="uri", method="get", enctype="multipart/form-data")[ T.fieldset[T.legend( class_="freeform-form-label")["View a file or directory"], "Tahoe-URI to view:" + SPACE, T.input(type="text", name="uri"), SPACE * 2, T.input(type="submit", value="View!"), ]] return T.div[form] def render_upload_form(self, ctx, data): # This is a form where users can upload unlinked files. # Users can choose immutable, SDMF, or MDMF from a radio button. upload_chk = T.input(type='radio', name='format', value='chk', id='upload-chk', checked='checked') upload_sdmf = T.input(type='radio', name='format', value='sdmf', id='upload-sdmf') upload_mdmf = T.input(type='radio', name='format', value='mdmf', id='upload-mdmf') form = T.form( action="uri", method="post", enctype="multipart/form-data" )[T.fieldset[T.legend( class_="freeform-form-label")["Upload a file"], T.div[ "Choose a file:" + SPACE, T.input(type="file", name="file", class_="freeform-input-file" )], T.input(type="hidden", name="t", value="upload"), T.div[upload_chk, T.label(for_="upload-chk")[" Immutable"], SPACE, upload_sdmf, T.label(for_="upload-sdmf")[" SDMF"], SPACE, upload_mdmf, T.label(for_="upload-mdmf")[" MDMF (experimental)"], SPACE * 2, T.input(type="submit", value="Upload!")], ]] return T.div[form] def render_mkdir_form(self, ctx, data): # This is a form where users can create new directories. # Users can choose SDMF or MDMF from a radio button. mkdir_sdmf = T.input(type='radio', name='format', value='sdmf', id='mkdir-sdmf', checked='checked') mkdir_mdmf = T.input(type='radio', name='format', value='mdmf', id='mkdir-mdmf') form = T.form( action="uri", method="post", enctype="multipart/form-data" )[T.fieldset[ T.legend(class_="freeform-form-label")["Create a directory"], mkdir_sdmf, T.label(for_='mkdir-sdmf')[" SDMF"], SPACE, mkdir_mdmf, T.label(for_='mkdir-mdmf')[" MDMF (experimental)"], SPACE * 2, T.input(type="hidden", name="t", value="mkdir"), T.input(type="hidden", name="redirect_to_result", value="true"), T.input(type="submit", value="Create a directory"), ]] return T.div[form] def render_incident_button(self, ctx, data): # this button triggers a foolscap-logging "incident" form = T.form( action="report_incident", method="post", enctype="multipart/form-data")[T.fieldset[ T.input(type="hidden", name="t", value="report-incident"), "What went wrong?" + SPACE, T.input(type="text", name="details"), SPACE, T.input(type="submit", value=u"Save \u00BB"), ]] return T.div[form]
class MapupdateStatusPage(rend.Page, RateAndTimeMixin): docFactory = getxmlfile("map-update-status.xhtml") def __init__(self, data): rend.Page.__init__(self, data) self.update_status = data def render_started(self, ctx, data): TIME_FORMAT = "%H:%M:%S %d-%b-%Y" started_s = time.strftime(TIME_FORMAT, time.localtime(data.get_started())) return started_s def render_finished(self, ctx, data): when = data.get_finished() if not when: return "not yet" TIME_FORMAT = "%H:%M:%S %d-%b-%Y" started_s = time.strftime(TIME_FORMAT, time.localtime(data.get_finished())) return started_s def render_si(self, ctx, data): si_s = base32.b2a_or_none(data.get_storage_index()) if si_s is None: si_s = "(None)" return si_s def render_helper(self, ctx, data): return {True: "Yes", False: "No"}[data.using_helper()] def render_progress(self, ctx, data): progress = data.get_progress() # TODO: make an ascii-art bar return "%.1f%%" % (100.0 * progress) def render_status(self, ctx, data): return data.get_status() def render_problems(self, ctx, data): problems = data.problems if not problems: return "" l = T.ul() for peerid in sorted(problems.keys()): peerid_s = idlib.shortnodeid_b2a(peerid) l[T.li["[%s]: %s" % (peerid_s, problems[peerid])]] return ctx.tag["Server Problems:", l] def render_privkey_from(self, ctx, data): peerid = data.get_privkey_from() if peerid: return ctx.tag["Got privkey from: [%s]" % idlib.shortnodeid_b2a(peerid)] else: return "" def data_time_total(self, ctx, data): return self.update_status.timings.get("total") def data_time_initial_queries(self, ctx, data): return self.update_status.timings.get("initial_queries") def data_time_cumulative_verify(self, ctx, data): return self.update_status.timings.get("cumulative_verify") def render_server_timings(self, ctx, data): per_server = self.update_status.timings.get("per_server") if not per_server: return "" l = T.ul() for peerid in sorted(per_server.keys()): peerid_s = idlib.shortnodeid_b2a(peerid) times = [] for op, started, t in per_server[peerid]: #times.append("%s/%.4fs/%s/%s" % (op, # started, # self.render_time(None, started - self.update_status.get_started()), # self.render_time(None,t))) if op == "query": times.append(self.render_time(None, t)) elif op == "late": times.append("late(" + self.render_time(None, t) + ")") else: times.append("privkey(" + self.render_time(None, t) + ")") times_s = ", ".join(times) l[T.li["[%s]: %s" % (peerid_s, times_s)]] return T.li["Per-Server Response Times: ", l] def render_timing_chart(self, ctx, data): imageurl = self._timing_chart() return ctx.tag[imageurl] def _timing_chart(self): started = self.update_status.get_started() total = self.update_status.timings.get("total") per_server = self.update_status.timings.get("per_server") base = "http://chart.apis.google.com/chart?" pieces = ["cht=bhs"] pieces.append("chco=ffffff,4d89f9,c6d9fd") # colors data0 = [] data1 = [] data2 = [] nb_nodes = 0 graph_botom_margin = 21 graph_top_margin = 5 peerids_s = [] top_abs = started # we sort the queries by the time at which we sent the first request sorttable = [(times[0][1], peerid) for peerid, times in per_server.items()] sorttable.sort() peerids = [t[1] for t in sorttable] for peerid in peerids: nb_nodes += 1 times = per_server[peerid] peerid_s = idlib.shortnodeid_b2a(peerid) peerids_s.append(peerid_s) # for servermap updates, there are either one or two queries per # peer. The second (if present) is to get the privkey. op, q_started, q_elapsed = times[0] data0.append("%.3f" % (q_started - started)) data1.append("%.3f" % q_elapsed) top_abs = max(top_abs, q_started + q_elapsed) if len(times) > 1: op, p_started, p_elapsed = times[0] data2.append("%.3f" % p_elapsed) top_abs = max(top_abs, p_started + p_elapsed) else: data2.append("0.0") finished = self.update_status.get_finished() if finished: top_abs = max(top_abs, finished) top_rel = top_abs - started chs = "chs=400x%d" % ( (nb_nodes * 28) + graph_top_margin + graph_botom_margin) chd = "chd=t:" + "|".join( [",".join(data0), ",".join(data1), ",".join(data2)]) pieces.append(chd) pieces.append(chs) chds = "chds=0,%0.3f" % top_rel pieces.append(chds) pieces.append("chxt=x,y") pieces.append("chxr=0,0.0,%0.3f" % top_rel) pieces.append("chxl=1:|" + "|".join(reversed(peerids_s))) # use up to 10 grid lines, at decimal multiples. # mathutil.next_power_of_k doesn't handle numbers smaller than one, # unfortunately. #pieces.append("chg=" if total is not None: finished_f = 1.0 * total / top_rel pieces.append("chm=r,FF0000,0,%0.3f,%0.3f" % (finished_f, finished_f + 0.01)) url = base + "&".join(pieces) return T.img(src=url, border="1", align="right", float="right")
class Root(MultiFormatPage): addSlash = True docFactory = getxmlfile("welcome.xhtml") _connectedalts = { "not-configured": "Not Configured", "yes": "Connected", "no": "Disconnected", } def __init__(self, client, clock=None, now_fn=None): rend.Page.__init__(self, client) self.client = client self.now_fn = now_fn self.putChild("uri", URIHandler(client)) self.putChild("cap", URIHandler(client)) # handler for "/magic_folder" URIs self.putChild("magic_folder", magic_folder.MagicFolderWebApi(client)) # Handler for everything beneath "/private", an area of the resource # hierarchy which is only accessible with the private per-node API # auth token. self.putChild("private", create_private_tree(client.get_auth_token)) self.putChild("file", FileHandler(client)) self.putChild("named", FileHandler(client)) self.putChild("status", status.Status(client.get_history())) self.putChild("statistics", status.Statistics(client.stats_provider)) static_dir = resource_filename("allmydata.web", "static") for filen in os.listdir(static_dir): self.putChild(filen, nevow_File(os.path.join(static_dir, filen))) self.putChild("report_incident", IncidentReporter()) # until we get rid of nevow.Page in favour of twisted.web.resource # we can't use getChild() -- but we CAN use childFactory or # override locatechild def childFactory(self, ctx, name): request = IRequest(ctx) return self.getChild(name, request) def getChild(self, path, request): if path == "helper_status": # the Helper isn't attached until after the Tub starts, so this child # needs to created on each request return status.HelperStatus(self.client.helper) if path == "storage": # Storage isn't initialized until after the web hierarchy is # constructed so this child needs to be created later than # `__init__`. try: storage_server = self.client.getServiceNamed("storage") except KeyError: storage_server = None return storage.StorageStatus(storage_server, self.client.nickname) # FIXME: This code is duplicated in root.py and introweb.py. def data_rendered_at(self, ctx, data): return render_time(time.time()) def data_version(self, ctx, data): return get_package_versions_string() def data_import_path(self, ctx, data): return str(allmydata) def render_my_nodeid(self, ctx, data): tubid_s = "TubID: " + self.client.get_long_tubid() return T.td(title=tubid_s)[self.client.get_long_nodeid()] def data_my_nickname(self, ctx, data): return self.client.nickname def render_JSON(self, req): req.setHeader("content-type", "application/json; charset=utf-8") intro_summaries = [ s.summary for s in self.client.introducer_connection_statuses() ] sb = self.client.get_storage_broker() servers = self._describe_known_servers(sb) result = { "introducers": { "statuses": intro_summaries, }, "servers": servers } return json.dumps(result, indent=1) + "\n" def _describe_known_servers(self, broker): return sorted( list( self._describe_server(server) for server in broker.get_known_servers())) def _describe_server(self, server): status = server.get_connection_status() description = { u"nodeid": server.get_serverid(), u"connection_status": status.summary, u"available_space": server.get_available_space(), u"nickname": server.get_nickname(), u"version": None, u"last_received_data": status.last_received_time, } version = server.get_version() if version is not None: description[u"version"] = version["application-version"] return description def data_magic_folders(self, ctx, data): return self.client._magic_folders.keys() def render_magic_folder_row(self, ctx, data): magic_folder = self.client._magic_folders[data] (ok, messages) = magic_folder.get_public_status() ctx.fillSlots("magic_folder_name", data) if ok: ctx.fillSlots("magic_folder_status", "yes") ctx.fillSlots("magic_folder_status_alt", "working") else: ctx.fillSlots("magic_folder_status", "no") ctx.fillSlots("magic_folder_status_alt", "not working") status = T.ul(class_="magic-folder-status") for msg in messages: status[T.li[str(msg)]] return ctx.tag[status] def render_magic_folder(self, ctx, data): if not self.client._magic_folders: return T.p() return ctx.tag def render_services(self, ctx, data): ul = T.ul() try: ss = self.client.getServiceNamed("storage") stats = ss.get_stats() if stats["storage_server.accepting_immutable_shares"]: msg = "accepting new shares" else: msg = "not accepting new shares (read-only)" available = stats.get("storage_server.disk_avail") if available is not None: msg += ", %s available" % abbreviate_size(available) ul[T.li[T.a(href="storage")["Storage Server"], ": ", msg]] except KeyError: ul[T.li["Not running storage server"]] if self.client.helper: stats = self.client.helper.get_stats() active_uploads = stats["chk_upload_helper.active_uploads"] ul[T.li["Helper: %d active uploads" % (active_uploads, )]] else: ul[T.li["Not running helper"]] return ctx.tag[ul] def data_introducer_description(self, ctx, data): connected_count = self.data_connected_introducers(ctx, data) if connected_count == 0: return "No introducers connected" elif connected_count == 1: return "1 introducer connected" else: return "%s introducers connected" % (connected_count, ) def data_total_introducers(self, ctx, data): return len(self.client.introducer_connection_statuses()) def data_connected_introducers(self, ctx, data): return len([ 1 for cs in self.client.introducer_connection_statuses() if cs.connected ]) def data_connected_to_at_least_one_introducer(self, ctx, data): if self.data_connected_introducers(ctx, data): return "yes" return "no" def data_connected_to_at_least_one_introducer_alt(self, ctx, data): return self._connectedalts[ self.data_connected_to_at_least_one_introducer(ctx, data)] # In case we configure multiple introducers def data_introducers(self, ctx, data): return self.client.introducer_connection_statuses() def _render_connection_status(self, ctx, cs): connected = "yes" if cs.connected else "no" ctx.fillSlots("service_connection_status", connected) ctx.fillSlots("service_connection_status_alt", self._connectedalts[connected]) since = cs.last_connection_time ctx.fillSlots( "service_connection_status_rel_time", render_time_delta(since, self.now_fn()) if since is not None else "N/A") ctx.fillSlots("service_connection_status_abs_time", render_time_attr(since) if since is not None else "N/A") last_received_data_time = cs.last_received_time ctx.fillSlots( "last_received_data_abs_time", render_time_attr(last_received_data_time) if last_received_data_time is not None else "N/A") ctx.fillSlots( "last_received_data_rel_time", render_time_delta(last_received_data_time, self.now_fn()) if last_received_data_time is not None else "N/A") others = cs.non_connected_statuses if cs.connected: ctx.fillSlots("summary", cs.summary) if others: details = "\n".join([ "* %s: %s\n" % (which, others[which]) for which in sorted(others) ]) ctx.fillSlots("details", "Other hints:\n" + details) else: ctx.fillSlots("details", "(no other hints)") else: details = T.ul() for which in sorted(others): details[T.li["%s: %s" % (which, others[which])]] ctx.fillSlots("summary", [cs.summary, details]) ctx.fillSlots("details", "") def render_introducers_row(self, ctx, cs): self._render_connection_status(ctx, cs) return ctx.tag def data_helper_furl_prefix(self, ctx, data): try: uploader = self.client.getServiceNamed("uploader") except KeyError: return None furl, connected = uploader.get_helper_info() if not furl: return None # trim off the secret swissnum (prefix, _, swissnum) = furl.rpartition("/") return "%s/[censored]" % (prefix, ) def data_helper_description(self, ctx, data): if self.data_connected_to_helper(ctx, data) == "no": return "Helper not connected" return "Helper" def data_connected_to_helper(self, ctx, data): try: uploader = self.client.getServiceNamed("uploader") except KeyError: return "no" # we don't even have an Uploader furl, connected = uploader.get_helper_info() if furl is None: return "not-configured" if connected: return "yes" return "no" def data_connected_to_helper_alt(self, ctx, data): return self._connectedalts[self.data_connected_to_helper(ctx, data)] def data_known_storage_servers(self, ctx, data): sb = self.client.get_storage_broker() return len(sb.get_all_serverids()) def data_connected_storage_servers(self, ctx, data): sb = self.client.get_storage_broker() return len(sb.get_connected_servers()) def data_services(self, ctx, data): sb = self.client.get_storage_broker() return sorted(sb.get_known_servers(), key=lambda s: s.get_serverid()) def render_service_row(self, ctx, server): cs = server.get_connection_status() self._render_connection_status(ctx, cs) ctx.fillSlots("peerid", server.get_longname()) ctx.fillSlots("nickname", server.get_nickname()) announcement = server.get_announcement() version = announcement.get("my-version", "") available_space = server.get_available_space() if available_space is None: available_space = "N/A" else: available_space = abbreviate_size(available_space) ctx.fillSlots("version", version) ctx.fillSlots("available_space", available_space) return ctx.tag def render_download_form(self, ctx, data): # this is a form where users can download files by URI form = T.form( action="uri", method="get", enctype="multipart/form-data")[T.fieldset[ T.legend(class_="freeform-form-label")["Download a file"], T.div["Tahoe-URI to download:" + SPACE, T.input(type="text", name="uri")], T.div["Filename to download as:" + SPACE, T.input(type="text", name="filename")], T.input(type="submit", value="Download!"), ]] return T.div[form] def render_view_form(self, ctx, data): # this is a form where users can download files by URI, or jump to a # named directory form = T.form( action="uri", method="get", enctype="multipart/form-data")[ T.fieldset[T.legend( class_="freeform-form-label")["View a file or directory"], "Tahoe-URI to view:" + SPACE, T.input(type="text", name="uri"), SPACE * 2, T.input(type="submit", value="View!"), ]] return T.div[form] def render_upload_form(self, ctx, data): # This is a form where users can upload unlinked files. # Users can choose immutable, SDMF, or MDMF from a radio button. upload_chk = T.input(type='radio', name='format', value='chk', id='upload-chk', checked='checked') upload_sdmf = T.input(type='radio', name='format', value='sdmf', id='upload-sdmf') upload_mdmf = T.input(type='radio', name='format', value='mdmf', id='upload-mdmf') form = T.form( action="uri", method="post", enctype="multipart/form-data" )[T.fieldset[T.legend( class_="freeform-form-label")["Upload a file"], T.div[ "Choose a file:" + SPACE, T.input(type="file", name="file", class_="freeform-input-file" )], T.input(type="hidden", name="t", value="upload"), T.div[upload_chk, T.label(for_="upload-chk")[" Immutable"], SPACE, upload_sdmf, T.label(for_="upload-sdmf")[" SDMF"], SPACE, upload_mdmf, T.label(for_="upload-mdmf")[" MDMF (experimental)"], SPACE * 2, T.input(type="submit", value="Upload!")], ]] return T.div[form] def render_mkdir_form(self, ctx, data): # This is a form where users can create new directories. # Users can choose SDMF or MDMF from a radio button. mkdir_sdmf = T.input(type='radio', name='format', value='sdmf', id='mkdir-sdmf', checked='checked') mkdir_mdmf = T.input(type='radio', name='format', value='mdmf', id='mkdir-mdmf') form = T.form( action="uri", method="post", enctype="multipart/form-data" )[T.fieldset[ T.legend(class_="freeform-form-label")["Create a directory"], mkdir_sdmf, T.label(for_='mkdir-sdmf')[" SDMF"], SPACE, mkdir_mdmf, T.label(for_='mkdir-mdmf')[" MDMF (experimental)"], SPACE * 2, T.input(type="hidden", name="t", value="mkdir"), T.input(type="hidden", name="redirect_to_result", value="true"), T.input(type="submit", value="Create a directory"), ]] return T.div[form] def render_incident_button(self, ctx, data): # this button triggers a foolscap-logging "incident" form = T.form( action="report_incident", method="post", enctype="multipart/form-data")[T.fieldset[ T.input(type="hidden", name="t", value="report-incident"), "What went wrong?" + SPACE, T.input(type="text", name="details"), SPACE, T.input(type="submit", value=u"Save \u00BB"), ]] return T.div[form]
class DeepCheckAndRepairResultsRenderer(rend.Page, ResultsBase, ReloadMixin): docFactory = getxmlfile("deep-check-and-repair-results.xhtml") def __init__(self, client, monitor): self.client = client self.monitor = monitor def childFactory(self, ctx, name): if not name: return self # /operation/$OPHANDLE/$STORAGEINDEX provides detailed information # about a specific file or directory that was checked si = base32.a2b(name) s = self.monitor.get_status() try: results = s.get_results_for_storage_index(si) return CheckAndRepairResultsRenderer(self.client, results) except KeyError: raise WebError("No detailed results for SI %s" % html.escape(name), http.NOT_FOUND) def renderHTTP(self, ctx): if self.want_json(ctx): return self.json(ctx) return rend.Page.renderHTTP(self, ctx) def json(self, ctx): inevow.IRequest(ctx).setHeader("content-type", "text/plain") res = self.monitor.get_status() data = {} data["finished"] = self.monitor.is_finished() data["root-storage-index"] = res.get_root_storage_index_string() c = res.get_counters() data["count-objects-checked"] = c["count-objects-checked"] data["count-objects-healthy-pre-repair"] = c[ "count-objects-healthy-pre-repair"] data["count-objects-unhealthy-pre-repair"] = c[ "count-objects-unhealthy-pre-repair"] data["count-objects-healthy-post-repair"] = c[ "count-objects-healthy-post-repair"] data["count-objects-unhealthy-post-repair"] = c[ "count-objects-unhealthy-post-repair"] data["count-repairs-attempted"] = c["count-repairs-attempted"] data["count-repairs-successful"] = c["count-repairs-successful"] data["count-repairs-unsuccessful"] = c["count-repairs-unsuccessful"] data["count-corrupt-shares-pre-repair"] = c[ "count-corrupt-shares-pre-repair"] data["count-corrupt-shares-post-repair"] = c[ "count-corrupt-shares-pre-repair"] data["list-corrupt-shares"] = [ (s.get_longname(), base32.b2a(storage_index), shnum) for (s, storage_index, shnum) in res.get_corrupt_shares() ] remaining_corrupt = [(s.get_longname(), base32.b2a(storage_index), shnum) for (s, storage_index, shnum) in res.get_remaining_corrupt_shares()] data["list-remaining-corrupt-shares"] = remaining_corrupt unhealthy = [(path_t, json_check_results(crr.get_pre_repair_results())) for (path_t, crr) in res.get_all_results().items() if not crr.get_pre_repair_results().is_healthy()] data["list-unhealthy-files"] = unhealthy data["stats"] = res.get_stats() return simplejson.dumps(data, indent=1) + "\n" def render_root_storage_index(self, ctx, data): return self.monitor.get_status().get_root_storage_index_string() def data_objects_checked(self, ctx, data): return self.monitor.get_status().get_counters( )["count-objects-checked"] def data_objects_healthy(self, ctx, data): return self.monitor.get_status().get_counters( )["count-objects-healthy-pre-repair"] def data_objects_unhealthy(self, ctx, data): return self.monitor.get_status().get_counters( )["count-objects-unhealthy-pre-repair"] def data_corrupt_shares(self, ctx, data): return self.monitor.get_status().get_counters( )["count-corrupt-shares-pre-repair"] def data_repairs_attempted(self, ctx, data): return self.monitor.get_status().get_counters( )["count-repairs-attempted"] def data_repairs_successful(self, ctx, data): return self.monitor.get_status().get_counters( )["count-repairs-successful"] def data_repairs_unsuccessful(self, ctx, data): return self.monitor.get_status().get_counters( )["count-repairs-unsuccessful"] def data_objects_healthy_post(self, ctx, data): return self.monitor.get_status().get_counters( )["count-objects-healthy-post-repair"] def data_objects_unhealthy_post(self, ctx, data): return self.monitor.get_status().get_counters( )["count-objects-unhealthy-post-repair"] def data_corrupt_shares_post(self, ctx, data): return self.monitor.get_status().get_counters( )["count-corrupt-shares-post-repair"] def render_pre_repair_problems_p(self, ctx, data): c = self.monitor.get_status().get_counters() if c["count-objects-unhealthy-pre-repair"]: return ctx.tag return "" def data_pre_repair_problems(self, ctx, data): all_objects = self.monitor.get_status().get_all_results() for path in sorted(all_objects.keys()): r = all_objects[path] assert ICheckAndRepairResults.providedBy(r) cr = r.get_pre_repair_results() if not cr.is_healthy(): yield path, cr def render_problem(self, ctx, data): path, cr = data return ctx.tag[self._join_pathstring(path), ": ", self._html(cr.get_summary())] def render_post_repair_problems_p(self, ctx, data): c = self.monitor.get_status().get_counters() if (c["count-objects-unhealthy-post-repair"] or c["count-corrupt-shares-post-repair"]): return ctx.tag return "" def data_post_repair_problems(self, ctx, data): all_objects = self.monitor.get_status().get_all_results() for path in sorted(all_objects.keys()): r = all_objects[path] assert ICheckAndRepairResults.providedBy(r) cr = r.get_post_repair_results() if not cr.is_healthy(): yield path, cr def render_servers_with_corrupt_shares_p(self, ctx, data): if self.monitor.get_status().get_counters( )["count-corrupt-shares-pre-repair"]: return ctx.tag return "" def data_servers_with_corrupt_shares(self, ctx, data): return [] # TODO def render_server_problem(self, ctx, data): pass def render_remaining_corrupt_shares_p(self, ctx, data): if self.monitor.get_status().get_counters( )["count-corrupt-shares-post-repair"]: return ctx.tag return "" def data_post_repair_corrupt_shares(self, ctx, data): return [] # TODO def render_share_problem(self, ctx, data): pass def render_return(self, ctx, data): req = inevow.IRequest(ctx) return_to = get_arg(req, "return_to", None) if return_to: return T.div[T.a(href=return_to)["Return to file/directory."]] return "" def data_all_objects(self, ctx, data): r = self.monitor.get_status().get_all_results() for path in sorted(r.keys()): yield (path, r[path]) def render_object(self, ctx, data): path, r = data ctx.fillSlots("path", self._join_pathstring(path)) ctx.fillSlots("healthy_pre_repair", str(r.get_pre_repair_results().is_healthy())) ctx.fillSlots("recoverable_pre_repair", str(r.get_pre_repair_results().is_recoverable())) ctx.fillSlots("healthy_post_repair", str(r.get_post_repair_results().is_healthy())) storage_index = r.get_storage_index() ctx.fillSlots("storage_index", self._render_si_link(ctx, storage_index)) ctx.fillSlots("summary", self._html(r.get_pre_repair_results().get_summary())) return ctx.tag def render_runtime(self, ctx, data): req = inevow.IRequest(ctx) runtime = time.time() - req.processing_started_timestamp return ctx.tag["runtime: %s seconds" % runtime]
class DeepCheckResultsRenderer(rend.Page, ResultsBase, ReloadMixin): docFactory = getxmlfile("deep-check-results.xhtml") def __init__(self, client, monitor): self.client = client self.monitor = monitor def childFactory(self, ctx, name): if not name: return self # /operation/$OPHANDLE/$STORAGEINDEX provides detailed information # about a specific file or directory that was checked si = base32.a2b(name) r = self.monitor.get_status() try: return CheckResultsRenderer(self.client, r.get_results_for_storage_index(si)) except KeyError: raise WebError("No detailed results for SI %s" % html.escape(name), http.NOT_FOUND) def renderHTTP(self, ctx): if self.want_json(ctx): return self.json(ctx) return rend.Page.renderHTTP(self, ctx) def json(self, ctx): inevow.IRequest(ctx).setHeader("content-type", "text/plain") data = {} data["finished"] = self.monitor.is_finished() res = self.monitor.get_status() data["root-storage-index"] = res.get_root_storage_index_string() c = res.get_counters() data["count-objects-checked"] = c["count-objects-checked"] data["count-objects-healthy"] = c["count-objects-healthy"] data["count-objects-unhealthy"] = c["count-objects-unhealthy"] data["count-corrupt-shares"] = c["count-corrupt-shares"] data["list-corrupt-shares"] = [ (s.get_longname(), base32.b2a(storage_index), shnum) for (s, storage_index, shnum) in res.get_corrupt_shares() ] data["list-unhealthy-files"] = [ (path_t, json_check_results(r)) for (path_t, r) in res.get_all_results().items() if not r.is_healthy() ] data["stats"] = res.get_stats() return simplejson.dumps(data, indent=1) + "\n" def render_root_storage_index(self, ctx, data): return self.monitor.get_status().get_root_storage_index_string() def data_objects_checked(self, ctx, data): return self.monitor.get_status().get_counters( )["count-objects-checked"] def data_objects_healthy(self, ctx, data): return self.monitor.get_status().get_counters( )["count-objects-healthy"] def data_objects_unhealthy(self, ctx, data): return self.monitor.get_status().get_counters( )["count-objects-unhealthy"] def data_objects_unrecoverable(self, ctx, data): return self.monitor.get_status().get_counters( )["count-objects-unrecoverable"] def data_count_corrupt_shares(self, ctx, data): return self.monitor.get_status().get_counters()["count-corrupt-shares"] def render_problems_p(self, ctx, data): c = self.monitor.get_status().get_counters() if c["count-objects-unhealthy"]: return ctx.tag return "" def data_problems(self, ctx, data): all_objects = self.monitor.get_status().get_all_results() for path in sorted(all_objects.keys()): cr = all_objects[path] assert ICheckResults.providedBy(cr) if not cr.is_healthy(): yield path, cr def render_problem(self, ctx, data): path, cr = data summary_text = "" summary = cr.get_summary() if summary: summary_text = ": " + summary summary_text += " [SI: %s]" % cr.get_storage_index_string() return ctx.tag[self._join_pathstring(path), self._html(summary_text)] def render_servers_with_corrupt_shares_p(self, ctx, data): if self.monitor.get_status().get_counters()["count-corrupt-shares"]: return ctx.tag return "" def data_servers_with_corrupt_shares(self, ctx, data): servers = [ s for (s, storage_index, sharenum) in self.monitor.get_status().get_corrupt_shares() ] servers.sort(key=lambda s: s.get_longname()) return servers def render_server_problem(self, ctx, server): data = [server.get_name()] nickname = server.get_nickname() if nickname: data.append(" (%s)" % self._html(nickname)) return ctx.tag[data] def render_corrupt_shares_p(self, ctx, data): if self.monitor.get_status().get_counters()["count-corrupt-shares"]: return ctx.tag return "" def data_corrupt_shares(self, ctx, data): return self.monitor.get_status().get_corrupt_shares() def render_share_problem(self, ctx, data): server, storage_index, sharenum = data nickname = server.get_nickname() ctx.fillSlots("serverid", server.get_name()) if nickname: ctx.fillSlots("nickname", self._html(nickname)) ctx.fillSlots("si", self._render_si_link(ctx, storage_index)) ctx.fillSlots("shnum", str(sharenum)) return ctx.tag def render_return(self, ctx, data): req = inevow.IRequest(ctx) return_to = get_arg(req, "return_to", None) if return_to: return T.div[T.a(href=return_to)["Return to file/directory."]] return "" def data_all_objects(self, ctx, data): r = self.monitor.get_status().get_all_results() for path in sorted(r.keys()): yield (path, r[path]) def render_object(self, ctx, data): path, r = data ctx.fillSlots("path", self._join_pathstring(path)) ctx.fillSlots("healthy", str(r.is_healthy())) ctx.fillSlots("recoverable", str(r.is_recoverable())) storage_index = r.get_storage_index() ctx.fillSlots("storage_index", self._render_si_link(ctx, storage_index)) ctx.fillSlots("summary", self._html(r.get_summary())) return ctx.tag def render_runtime(self, ctx, data): req = inevow.IRequest(ctx) runtime = time.time() - req.processing_started_timestamp return ctx.tag["runtime: %s seconds" % runtime]
class StorageStatus(rend.Page): docFactory = getxmlfile("storage_status.xhtml") # the default 'data' argument is the StorageServer instance def __init__(self, storage): rend.Page.__init__(self, storage) self.storage = storage def renderHTTP(self, ctx): req = inevow.IRequest(ctx) t = get_arg(req, "t") if t == "json": return self.render_JSON(req) return rend.Page.renderHTTP(self, ctx) def render_JSON(self, req): req.setHeader("content-type", "text/plain") d = { "stats": self.storage.get_stats(), "bucket-counter": self.storage.bucket_counter.get_state(), "lease-checker": self.storage.lease_checker.get_state(), "lease-checker-progress": self.storage.lease_checker.get_progress(), } return simplejson.dumps(d, indent=1) + "\n" def render_storage_running(self, ctx, storage): if storage: return ctx.tag else: return T.h1["No Storage Server Running"] def render_bool(self, ctx, data): return {True: "Yes", False: "No"}[bool(data)] def render_abbrev_space(self, ctx, size): if size is None: return "?" return abbreviate_space(size) def render_space(self, ctx, size): if size is None: return "?" return "%d" % size def data_stats(self, ctx, data): # FYI: 'data' appears to be self, rather than the StorageServer # object in self.original that gets passed to render_* methods. I # still don't understand Nevow. # Nevow has nevow.accessors.DictionaryContainer: Any data= directive # that appears in a context in which the current data is a dictionary # will be looked up as keys in that dictionary. So if data_stats() # returns a dictionary, then we can use something like this: # # <ul n:data="stats"> # <li>disk_total: <span n:render="abbrev" n:data="disk_total" /></li> # </ul> # to use get_stats()["storage_server.disk_total"] . However, # DictionaryContainer does a raw d[] instead of d.get(), so any # missing keys will cause an error, even if the renderer can tolerate # None values. To overcome this, we either need a dict-like object # that always returns None for unknown keys, or we must pre-populate # our dict with those missing keys, or we should get rid of data_ # methods that return dicts (or find some way to override Nevow's # handling of dictionaries). d = dict([(remove_prefix(k, "storage_server."), v) for k, v in self.storage.get_stats().items()]) d.setdefault("disk_total", None) d.setdefault("disk_used", None) d.setdefault("disk_free_for_root", None) d.setdefault("disk_free_for_nonroot", None) d.setdefault("reserved_space", None) d.setdefault("disk_avail", None) return d def data_last_complete_bucket_count(self, ctx, data): s = self.storage.bucket_counter.get_state() count = s.get("last-complete-bucket-count") if count is None: return "Not computed yet" return count def render_count_crawler_status(self, ctx, storage): p = self.storage.bucket_counter.get_progress() return ctx.tag[self.format_crawler_progress(p)] def format_crawler_progress(self, p): cycletime = p["estimated-time-per-cycle"] cycletime_s = "" if cycletime is not None: cycletime_s = " (estimated cycle time %s)" % abbreviate_time( cycletime) if p["cycle-in-progress"]: pct = p["cycle-complete-percentage"] soon = p["remaining-sleep-time"] eta = p["estimated-cycle-complete-time-left"] eta_s = "" if eta is not None: eta_s = " (ETA %ds)" % eta return [ "Current crawl %.1f%% complete" % pct, eta_s, " (next work in %s)" % abbreviate_time(soon), cycletime_s, ] else: soon = p["remaining-wait-time"] return ["Next crawl in %s" % abbreviate_time(soon), cycletime_s] def render_lease_expiration_enabled(self, ctx, data): lc = self.storage.lease_checker if lc.expiration_enabled: return ctx.tag["Enabled: expired leases will be removed"] else: return ctx.tag[ "Disabled: scan-only mode, no leases will be removed"] def render_lease_expiration_mode(self, ctx, data): lc = self.storage.lease_checker if lc.mode == "age": if lc.override_lease_duration is None: ctx.tag["Leases will expire naturally, probably 31 days after " "creation or renewal."] else: ctx.tag["Leases created or last renewed more than %s ago " "will be considered expired." % abbreviate_time(lc.override_lease_duration)] else: assert lc.mode == "cutoff-date" localizedutcdate = time.strftime("%d-%b-%Y", time.gmtime(lc.cutoff_date)) isoutcdate = time_format.iso_utc_date(lc.cutoff_date) ctx.tag["Leases created or last renewed before %s (%s) UTC " "will be considered expired." % ( isoutcdate, localizedutcdate, )] if len(lc.mode) > 2: ctx.tag[" The following sharetypes will be expired: ", " ".join(sorted(lc.sharetypes_to_expire)), "."] return ctx.tag def format_recovered(self, sr, a): def maybe(d): if d is None: return "?" return "%d" % d return "%s shares, %s buckets (%s mutable / %s immutable), %s (%s / %s)" % \ (maybe(sr["%s-shares" % a]), maybe(sr["%s-buckets" % a]), maybe(sr["%s-buckets-mutable" % a]), maybe(sr["%s-buckets-immutable" % a]), abbreviate_space(sr["%s-diskbytes" % a]), abbreviate_space(sr["%s-diskbytes-mutable" % a]), abbreviate_space(sr["%s-diskbytes-immutable" % a]), ) def render_lease_current_cycle_progress(self, ctx, data): lc = self.storage.lease_checker p = lc.get_progress() return ctx.tag[self.format_crawler_progress(p)] def render_lease_current_cycle_results(self, ctx, data): lc = self.storage.lease_checker p = lc.get_progress() if not p["cycle-in-progress"]: return "" s = lc.get_state() so_far = s["cycle-to-date"] sr = so_far["space-recovered"] er = s["estimated-remaining-cycle"] esr = er["space-recovered"] ec = s["estimated-current-cycle"] ecr = ec["space-recovered"] p = T.ul() def add(*pieces): p[T.li[pieces]] def maybe(d): if d is None: return "?" return "%d" % d add( "So far, this cycle has examined %d shares in %d buckets" % (sr["examined-shares"], sr["examined-buckets"]), " (%d mutable / %d immutable)" % (sr["examined-buckets-mutable"], sr["examined-buckets-immutable"]), " (%s / %s)" % (abbreviate_space(sr["examined-diskbytes-mutable"]), abbreviate_space(sr["examined-diskbytes-immutable"])), ) add("and has recovered: ", self.format_recovered(sr, "actual")) if so_far["expiration-enabled"]: add("The remainder of this cycle is expected to recover: ", self.format_recovered(esr, "actual")) add("The whole cycle is expected to examine %s shares in %s buckets" % (maybe(ecr["examined-shares"]), maybe( ecr["examined-buckets"]))) add("and to recover: ", self.format_recovered(ecr, "actual")) else: add("If expiration were enabled, we would have recovered: ", self.format_recovered(sr, "configured"), " by now") add("and the remainder of this cycle would probably recover: ", self.format_recovered(esr, "configured")) add("and the whole cycle would probably recover: ", self.format_recovered(ecr, "configured")) add( "if we were strictly using each lease's default 31-day lease lifetime " "(instead of our configured behavior), " "this cycle would be expected to recover: ", self.format_recovered(ecr, "original")) if so_far["corrupt-shares"]: add( "Corrupt shares:", T.ul[[ T.li[[ "SI %s shnum %d" % corrupt_share for corrupt_share in so_far["corrupt-shares"] ]] ]]) return ctx.tag["Current cycle:", p] def render_lease_last_cycle_results(self, ctx, data): lc = self.storage.lease_checker h = lc.get_state()["history"] if not h: return "" last = h[max(h.keys())] start, end = last["cycle-start-finish-times"] ctx.tag["Last complete cycle (which took %s and finished %s ago)" " recovered: " % (abbreviate_time(end - start), abbreviate_time(time.time() - end)), self.format_recovered(last["space-recovered"], "actual")] p = T.ul() def add(*pieces): p[T.li[pieces]] saw = self.format_recovered(last["space-recovered"], "examined") add("and saw a total of ", saw) if not last["expiration-enabled"]: rec = self.format_recovered(last["space-recovered"], "configured") add( "but expiration was not enabled. If it had been, " "it would have recovered: ", rec) if last["corrupt-shares"]: add( "Corrupt shares:", T.ul[[ T.li[[ "SI %s shnum %d" % corrupt_share for corrupt_share in last["corrupt-shares"] ]] ]]) return ctx.tag[p]
class Statistics(rend.Page): docFactory = getxmlfile("statistics.xhtml") def __init__(self, provider): rend.Page.__init__(self, provider) self.provider = provider def renderHTTP(self, ctx): req = inevow.IRequest(ctx) t = get_arg(req, "t") if t == "json": stats = self.provider.get_stats() req.setHeader("content-type", "text/plain") return simplejson.dumps(stats, indent=1) + "\n" return rend.Page.renderHTTP(self, ctx) def data_get_stats(self, ctx, data): return self.provider.get_stats() def render_load_average(self, ctx, data): return str(data["stats"].get("load_monitor.avg_load")) def render_peak_load(self, ctx, data): return str(data["stats"].get("load_monitor.max_load")) def render_uploads(self, ctx, data): files = data["counters"].get("uploader.files_uploaded", 0) bytes = data["counters"].get("uploader.bytes_uploaded", 0) return ("%s files / %s bytes (%s)" % (files, bytes, abbreviate_size(bytes))) def render_downloads(self, ctx, data): files = data["counters"].get("downloader.files_downloaded", 0) bytes = data["counters"].get("downloader.bytes_downloaded", 0) return ("%s files / %s bytes (%s)" % (files, bytes, abbreviate_size(bytes))) def render_publishes(self, ctx, data): files = data["counters"].get("mutable.files_published", 0) bytes = data["counters"].get("mutable.bytes_published", 0) return "%s files / %s bytes (%s)" % (files, bytes, abbreviate_size(bytes)) def render_retrieves(self, ctx, data): files = data["counters"].get("mutable.files_retrieved", 0) bytes = data["counters"].get("mutable.bytes_retrieved", 0) return "%s files / %s bytes (%s)" % (files, bytes, abbreviate_size(bytes)) def render_drop_monitored(self, ctx, data): dirs = data["counters"].get("drop_upload.dirs_monitored", 0) return "%s directories" % (dirs, ) def render_drop_uploads(self, ctx, data): # TODO: bytes uploaded files = data["counters"].get("drop_upload.files_uploaded", 0) return "%s files" % (files, ) def render_drop_queued(self, ctx, data): files = data["counters"].get("drop_upload.files_queued", 0) return "%s files" % (files, ) def render_drop_failed(self, ctx, data): files = data["counters"].get("drop_upload.files_failed", 0) return "%s files" % (files, ) def render_raw(self, ctx, data): raw = pprint.pformat(data) return ctx.tag[raw]
class IntroducerRoot(rend.Page): addSlash = True docFactory = getxmlfile("introducer.xhtml") child_operations = None def __init__(self, introducer_node): self.introducer_node = introducer_node self.introducer_service = introducer_node.getServiceNamed("introducer") rend.Page.__init__(self, introducer_node) static_dir = resource_filename("allmydata.web", "static") for filen in os.listdir(static_dir): self.putChild(filen, nevow_File(os.path.join(static_dir, filen))) def renderHTTP(self, ctx): t = get_arg(inevow.IRequest(ctx), "t") if t == "json": return self.render_JSON(ctx) return rend.Page.renderHTTP(self, ctx) def render_JSON(self, ctx): res = {} counts = {} for s in self.introducer_service.get_subscribers(): if s.service_name not in counts: counts[s.service_name] = 0 counts[s.service_name] += 1 res["subscription_summary"] = counts announcement_summary = {} service_hosts = {} for ad in self.introducer_service.get_announcements(): service_name = ad.service_name if service_name not in announcement_summary: announcement_summary[service_name] = 0 announcement_summary[service_name] += 1 if service_name not in service_hosts: service_hosts[service_name] = set() # it's nice to know how many distinct hosts are available for # each service. We define a "host" by a set of addresses # (hostnames or ipv4 addresses), which we extract from the # connection hints. In practice, this is usually close # enough: when multiple services are run on a single host, # they're usually either configured with the same addresses, # or setLocationAutomatically picks up the same interfaces. host = frozenset(ad.advertised_addresses) service_hosts[service_name].add(host) res["announcement_summary"] = announcement_summary distinct_hosts = dict([(name, len(hosts)) for (name, hosts) in service_hosts.iteritems()]) res["announcement_distinct_hosts"] = distinct_hosts return simplejson.dumps(res, indent=1) + "\n" # FIXME: This code is duplicated in root.py and introweb.py. def data_version(self, ctx, data): return get_package_versions_string() def data_import_path(self, ctx, data): return str(allmydata).replace("/", "/ ") # XXX kludge for wrapping def data_my_nodeid(self, ctx, data): return idlib.nodeid_b2a(self.introducer_node.nodeid) def render_announcement_summary(self, ctx, data): services = {} for ad in self.introducer_service.get_announcements(): if ad.service_name not in services: services[ad.service_name] = 0 services[ad.service_name] += 1 service_names = services.keys() service_names.sort() return ", ".join([ "%s: %d" % (service_name, services[service_name]) for service_name in service_names ]) def render_client_summary(self, ctx, data): counts = {} for s in self.introducer_service.get_subscribers(): if s.service_name not in counts: counts[s.service_name] = 0 counts[s.service_name] += 1 return ", ".join([ "%s: %d" % (name, counts[name]) for name in sorted(counts.keys()) ]) def data_services(self, ctx, data): services = self.introducer_service.get_announcements(False) services.sort(key=lambda ad: (ad.service_name, ad.nickname)) return services def render_service_row(self, ctx, ad): ctx.fillSlots("serverid", ad.serverid) ctx.fillSlots("nickname", ad.nickname) ctx.fillSlots("advertised", " ".join(ad.advertised_addresses)) ctx.fillSlots("connected", "?") when_s = time.strftime("%H:%M:%S %d-%b-%Y", time.localtime(ad.when)) ctx.fillSlots("announced", when_s) ctx.fillSlots("version", ad.version) ctx.fillSlots("service_name", ad.service_name) return ctx.tag def data_subscribers(self, ctx, data): return self.introducer_service.get_subscribers() def render_subscriber_row(self, ctx, s): ctx.fillSlots("nickname", s.nickname) ctx.fillSlots("peerid", s.tubid) ctx.fillSlots("advertised", " ".join(s.advertised_addresses)) ctx.fillSlots("connected", s.remote_address) since_s = time.strftime("%H:%M:%S %d-%b-%Y", time.localtime(s.when)) ctx.fillSlots("since", since_s) ctx.fillSlots("version", s.version) ctx.fillSlots("service_name", s.service_name) return ctx.tag
class ProvisioningTool(rend.Page): addSlash = True docFactory = getxmlfile("provisioning.xhtml") def render_forms(self, ctx, data): req = inevow.IRequest(ctx) def getarg(name, astype=int): if req.method != "POST": return None if name in req.fields: return astype(req.fields[name].value) return None return self.do_forms(getarg) def do_forms(self, getarg): filled = getarg("filled", bool) def get_and_set(name, options, default=None, astype=int): current_value = getarg(name, astype) i_select = T.select(name=name) for (count, description) in options: count = astype(count) if ((current_value is not None and count == current_value) or (current_value is None and count == default)): o = T.option(value=str(count), selected="true")[description] else: o = T.option(value=str(count))[description] i_select = i_select[o] if current_value is None: current_value = default return current_value, i_select sections = {} def add_input(section, text, entry): if section not in sections: sections[section] = [] sections[section].extend([T.div[text, ": ", entry], "\n"]) def add_output(section, entry): if section not in sections: sections[section] = [] sections[section].extend([entry, "\n"]) def build_section(section): return T.fieldset[T.legend[section], sections[section]] def number(value, suffix=""): scaling = 1 if value < 1: fmt = "%1.2g%s" elif value < 100: fmt = "%.1f%s" elif value < 1000: fmt = "%d%s" elif value < 1e6: fmt = "%.2fk%s" scaling = 1e3 elif value < 1e9: fmt = "%.2fM%s" scaling = 1e6 elif value < 1e12: fmt = "%.2fG%s" scaling = 1e9 elif value < 1e15: fmt = "%.2fT%s" scaling = 1e12 elif value < 1e18: fmt = "%.2fP%s" scaling = 1e15 else: fmt = "huge! %g%s" return fmt % (value / scaling, suffix) user_counts = [ (5, "5 users"), (50, "50 users"), (200, "200 users"), (1000, "1k users"), (10000, "10k users"), (50000, "50k users"), (100000, "100k users"), (500000, "500k users"), (1000000, "1M users"), ] num_users, i_num_users = get_and_set("num_users", user_counts, 50000) add_input("Users", "How many users are on this network?", i_num_users) files_per_user_counts = [ (100, "100 files"), (1000, "1k files"), (10000, "10k files"), (100000, "100k files"), (1e6, "1M files"), ] files_per_user, i_files_per_user = get_and_set("files_per_user", files_per_user_counts, 1000) add_input("Users", "How many files for each user? (avg)", i_files_per_user) space_per_user_sizes = [ (1e6, "1MB"), (10e6, "10MB"), (100e6, "100MB"), (200e6, "200MB"), (1e9, "1GB"), (2e9, "2GB"), (5e9, "5GB"), (10e9, "10GB"), (100e9, "100GB"), (1e12, "1TB"), ] # current allmydata average utilization 127MB per user space_per_user, i_space_per_user = get_and_set("space_per_user", space_per_user_sizes, 200e6) add_input("Users", "How much data for each user? (avg)", i_space_per_user) sharing_ratios = [ (1.0, "1.0x"), (1.1, "1.1x"), (2.0, "2.0x"), ] sharing_ratio, i_sharing_ratio = get_and_set("sharing_ratio", sharing_ratios, 1.0, float) add_input( "Users", "What is the sharing ratio? (1.0x is no-sharing and" " no convergence)", i_sharing_ratio) # Encoding parameters encoding_choices = [ ("3-of-10-5", "3.3x (3-of-10, repair below 5)"), ("3-of-10-8", "3.3x (3-of-10, repair below 8)"), ("5-of-10-7", "2x (5-of-10, repair below 7)"), ("8-of-10-9", "1.25x (8-of-10, repair below 9)"), ("27-of-30-28", "1.1x (27-of-30, repair below 28"), ("25-of-100-50", "4x (25-of-100, repair below 50)"), ] encoding_parameters, i_encoding_parameters = \ get_and_set("encoding_parameters", encoding_choices, "3-of-10-5", str) encoding_pieces = encoding_parameters.split("-") k = int(encoding_pieces[0]) assert encoding_pieces[1] == "of" n = int(encoding_pieces[2]) # we repair the file when the number of available shares drops below # this value repair_threshold = int(encoding_pieces[3]) add_input("Servers", "What are the default encoding parameters?", i_encoding_parameters) # Server info num_server_choices = [ (5, "5 servers"), (10, "10 servers"), (15, "15 servers"), (30, "30 servers"), (50, "50 servers"), (100, "100 servers"), (200, "200 servers"), (300, "300 servers"), (500, "500 servers"), (1000, "1k servers"), (2000, "2k servers"), (5000, "5k servers"), (10e3, "10k servers"), (100e3, "100k servers"), (1e6, "1M servers"), ] num_servers, i_num_servers = \ get_and_set("num_servers", num_server_choices, 30, int) add_input("Servers", "How many servers are there?", i_num_servers) # availability is measured in dBA = -dBF, where 0dBF is 100% failure, # 10dBF is 10% failure, 20dBF is 1% failure, etc server_dBA_choices = [ (10, "90% [10dBA] (2.4hr/day)"), (13, "95% [13dBA] (1.2hr/day)"), (20, "99% [20dBA] (14min/day or 3.5days/year)"), (23, "99.5% [23dBA] (7min/day or 1.75days/year)"), (30, "99.9% [30dBA] (87sec/day or 9hours/year)"), (40, "99.99% [40dBA] (60sec/week or 53min/year)"), (50, "99.999% [50dBA] (5min per year)"), ] server_dBA, i_server_availability = \ get_and_set("server_availability", server_dBA_choices, 20, int) add_input("Servers", "What is the server availability?", i_server_availability) drive_MTBF_choices = [ (40, "40,000 Hours"), ] drive_MTBF, i_drive_MTBF = \ get_and_set("drive_MTBF", drive_MTBF_choices, 40, int) add_input("Drives", "What is the hard drive MTBF?", i_drive_MTBF) # http://www.tgdaily.com/content/view/30990/113/ # http://labs.google.com/papers/disk_failures.pdf # google sees: # 1.7% of the drives they replaced were 0-1 years old # 8% of the drives they repalced were 1-2 years old # 8.6% were 2-3 years old # 6% were 3-4 years old, about 8% were 4-5 years old drive_size_choices = [ (100, "100 GB"), (250, "250 GB"), (500, "500 GB"), (750, "750 GB"), ] drive_size, i_drive_size = \ get_and_set("drive_size", drive_size_choices, 750, int) drive_size = drive_size * 1e9 add_input("Drives", "What is the capacity of each hard drive?", i_drive_size) drive_failure_model_choices = [ ("E", "Exponential"), ("U", "Uniform"), ] drive_failure_model, i_drive_failure_model = \ get_and_set("drive_failure_model", drive_failure_model_choices, "E", str) add_input("Drives", "How should we model drive failures?", i_drive_failure_model) # drive_failure_rate is in failures per second if drive_failure_model == "E": drive_failure_rate = 1.0 / (drive_MTBF * 1000 * 3600) else: drive_failure_rate = 0.5 / (drive_MTBF * 1000 * 3600) # deletion/gc/ownership mode ownership_choices = [ ("A", "no deletion, no gc, no owners"), ("B", "deletion, no gc, no owners"), ("C", "deletion, share timers, no owners"), ("D", "deletion, no gc, yes owners"), ("E", "deletion, owner timers"), ] ownership_mode, i_ownership_mode = \ get_and_set("ownership_mode", ownership_choices, "A", str) add_input("Servers", "What is the ownership mode?", i_ownership_mode) # client access behavior access_rates = [ (1, "one file per day"), (10, "10 files per day"), (100, "100 files per day"), (1000, "1k files per day"), (10e3, "10k files per day"), (100e3, "100k files per day"), ] download_files_per_day, i_download_rate = \ get_and_set("download_rate", access_rates, 100, int) add_input("Users", "How many files are downloaded per day?", i_download_rate) download_rate = 1.0 * download_files_per_day / (24 * 60 * 60) upload_files_per_day, i_upload_rate = \ get_and_set("upload_rate", access_rates, 10, int) add_input("Users", "How many files are uploaded per day?", i_upload_rate) upload_rate = 1.0 * upload_files_per_day / (24 * 60 * 60) delete_files_per_day, i_delete_rate = \ get_and_set("delete_rate", access_rates, 10, int) add_input("Users", "How many files are deleted per day?", i_delete_rate) delete_rate = 1.0 * delete_files_per_day / (24 * 60 * 60) # the value is in days lease_timers = [ (1, "one refresh per day"), (7, "one refresh per week"), ] lease_timer, i_lease = \ get_and_set("lease_timer", lease_timers, 7, int) add_input( "Users", "How frequently do clients refresh files or accounts? " "(if necessary)", i_lease) seconds_per_lease = 24 * 60 * 60 * lease_timer check_timer_choices = [ (1, "every week"), (4, "every month"), (8, "every two months"), (16, "every four months"), ] check_timer, i_check_timer = \ get_and_set("check_timer", check_timer_choices, 4, int) add_input("Users", "How frequently should we check on each file?", i_check_timer) file_check_interval = check_timer * 7 * 24 * 3600 if filled: add_output("Users", T.div["Total users: %s" % number(num_users)]) add_output("Users", T.div["Files per user: %s" % number(files_per_user)]) file_size = 1.0 * space_per_user / files_per_user add_output("Users", T.div["Average file size: ", number(file_size)]) total_files = num_users * files_per_user / sharing_ratio add_output( "Grid", T.div["Total number of files in grid: ", number(total_files)]) total_space = num_users * space_per_user / sharing_ratio add_output( "Grid", T.div["Total volume of plaintext in grid: ", number(total_space, "B")]) total_shares = n * total_files add_output("Grid", T.div["Total shares in grid: ", number(total_shares)]) expansion = float(n) / float(k) total_usage = expansion * total_space add_output("Grid", T.div["Share data in grid: ", number(total_usage, "B")]) if n > num_servers: # silly configuration, causes Tahoe2 to wrap and put multiple # shares on some servers. add_output( "Servers", T.div["non-ideal: more shares than servers" " (n=%d, servers=%d)" % (n, num_servers)]) # every file has at least one share on every server buckets_per_server = total_files shares_per_server = total_files * ((1.0 * n) / num_servers) else: # if nobody is full, then no lease requests will be turned # down for lack of space, and no two shares for the same file # will share a server. Therefore the chance that any given # file has a share on any given server is n/num_servers. buckets_per_server = total_files * ((1.0 * n) / num_servers) # since each such represented file only puts one share on a # server, the total number of shares per server is the same. shares_per_server = buckets_per_server add_output( "Servers", T.div["Buckets per server: ", number(buckets_per_server)]) add_output("Servers", T.div["Shares per server: ", number(shares_per_server)]) # how much space is used on the storage servers for the shares? # the share data itself share_data_per_server = total_usage / num_servers add_output( "Servers", T.div["Share data per server: ", number(share_data_per_server, "B")]) # this is determined empirically. H=hashsize=32, for a one-segment # file and 3-of-10 encoding share_validation_per_server = 266 * shares_per_server # this could be 423*buckets_per_server, if we moved the URI # extension into a separate file, but that would actually consume # *more* space (minimum filesize is 4KiB), unless we moved all # shares for a given bucket into a single file. share_uri_extension_per_server = 423 * shares_per_server # ownership mode adds per-bucket data H = 32 # depends upon the desired security of delete/refresh caps # bucket_lease_size is the amount of data needed to keep track of # the delete/refresh caps for each bucket. bucket_lease_size = 0 client_bucket_refresh_rate = 0 owner_table_size = 0 if ownership_mode in ("B", "C", "D", "E"): bucket_lease_size = sharing_ratio * 1.0 * H if ownership_mode in ("B", "C"): # refreshes per second per client client_bucket_refresh_rate = (1.0 * n * files_per_user / seconds_per_lease) add_output( "Users", T.div["Client share refresh rate (outbound): ", number(client_bucket_refresh_rate, "Hz")]) server_bucket_refresh_rate = (client_bucket_refresh_rate * num_users / num_servers) add_output( "Servers", T.div["Server share refresh rate (inbound): ", number(server_bucket_refresh_rate, "Hz")]) if ownership_mode in ("D", "E"): # each server must maintain a bidirectional mapping from # buckets to owners. One way to implement this would be to # put a list of four-byte owner numbers into each bucket, and # a list of four-byte share numbers into each owner (although # of course we'd really just throw it into a database and let # the experts take care of the details). owner_table_size = 2 * (buckets_per_server * sharing_ratio * 4) if ownership_mode in ("E", ): # in this mode, clients must refresh one timer per server client_account_refresh_rate = (1.0 * num_servers / seconds_per_lease) add_output( "Users", T.div["Client account refresh rate (outbound): ", number(client_account_refresh_rate, "Hz")]) server_account_refresh_rate = (client_account_refresh_rate * num_users / num_servers) add_output( "Servers", T.div["Server account refresh rate (inbound): ", number(server_account_refresh_rate, "Hz")]) # TODO: buckets vs shares here is a bit wonky, but in # non-wrapping grids it shouldn't matter share_lease_per_server = bucket_lease_size * buckets_per_server share_ownertable_per_server = owner_table_size share_space_per_server = (share_data_per_server + share_validation_per_server + share_uri_extension_per_server + share_lease_per_server + share_ownertable_per_server) add_output( "Servers", T.div["Share space per server: ", number(share_space_per_server, "B"), " (data ", number(share_data_per_server, "B"), ", validation ", number(share_validation_per_server, "B"), ", UEB ", number(share_uri_extension_per_server, "B"), ", lease ", number(share_lease_per_server, "B"), ", ownertable ", number(share_ownertable_per_server, "B"), ")", ]) # rates client_download_share_rate = download_rate * k client_download_byte_rate = download_rate * file_size add_output( "Users", T.div["download rate: shares = ", number(client_download_share_rate, "Hz"), " , bytes = ", number(client_download_byte_rate, "Bps"), ]) total_file_check_rate = 1.0 * total_files / file_check_interval client_check_share_rate = total_file_check_rate / num_users add_output( "Users", T.div["file check rate: shares = ", number(client_check_share_rate, "Hz"), " (interval = %s)" % number(1 / client_check_share_rate, "s"), ]) client_upload_share_rate = upload_rate * n # TODO: doesn't include overhead client_upload_byte_rate = upload_rate * file_size * expansion add_output( "Users", T.div["upload rate: shares = ", number(client_upload_share_rate, "Hz"), " , bytes = ", number(client_upload_byte_rate, "Bps"), ]) client_delete_share_rate = delete_rate * n server_inbound_share_rate = (client_upload_share_rate * num_users / num_servers) server_inbound_byte_rate = (client_upload_byte_rate * num_users / num_servers) add_output( "Servers", T.div["upload rate (inbound): shares = ", number(server_inbound_share_rate, "Hz"), " , bytes = ", number(server_inbound_byte_rate, "Bps"), ]) add_output( "Servers", T.div["share check rate (inbound): ", number(total_file_check_rate * n / num_servers, "Hz"), ]) server_share_modify_rate = ( (client_upload_share_rate + client_delete_share_rate) * num_users / num_servers) add_output( "Servers", T.div["share modify rate: shares = ", number(server_share_modify_rate, "Hz"), ]) server_outbound_share_rate = (client_download_share_rate * num_users / num_servers) server_outbound_byte_rate = (client_download_byte_rate * num_users / num_servers) add_output( "Servers", T.div["download rate (outbound): shares = ", number(server_outbound_share_rate, "Hz"), " , bytes = ", number(server_outbound_byte_rate, "Bps"), ]) total_share_space = num_servers * share_space_per_server add_output( "Grid", T.div["Share space consumed: ", number(total_share_space, "B")]) add_output( "Grid", T.div[" %% validation: %.2f%%" % (100.0 * share_validation_per_server / share_space_per_server)]) add_output( "Grid", T.div[" %% uri-extension: %.2f%%" % (100.0 * share_uri_extension_per_server / share_space_per_server)]) add_output( "Grid", T.div[" %% lease data: %.2f%%" % (100.0 * share_lease_per_server / share_space_per_server)]) add_output( "Grid", T.div[" %% owner data: %.2f%%" % (100.0 * share_ownertable_per_server / share_space_per_server)]) add_output( "Grid", T.div[" %% share data: %.2f%%" % (100.0 * share_data_per_server / share_space_per_server)]) add_output( "Grid", T.div["file check rate: ", number(total_file_check_rate, "Hz")]) total_drives = max( mathutil.div_ceil(int(total_share_space), int(drive_size)), num_servers) add_output( "Drives", T.div["Total drives: ", number(total_drives), " drives"]) drives_per_server = mathutil.div_ceil(total_drives, num_servers) add_output("Servers", T.div["Drives per server: ", drives_per_server]) # costs if drive_size == 750 * 1e9: add_output("Servers", T.div["750GB drive: $250 each"]) drive_cost = 250 else: add_output("Servers", T.div[T.b["unknown cost per drive, assuming $100"]]) drive_cost = 100 if drives_per_server <= 4: add_output("Servers", T.div["1U box with <= 4 drives: $1500"]) server_cost = 1500 # typical 1U box elif drives_per_server <= 12: add_output("Servers", T.div["2U box with <= 12 drives: $2500"]) server_cost = 2500 # 2U box else: add_output( "Servers", T.div[T.b["Note: too many drives per server, " "assuming $3000"]]) server_cost = 3000 server_capital_cost = (server_cost + drives_per_server * drive_cost) total_server_cost = float(num_servers * server_capital_cost) add_output( "Servers", T.div["Capital cost per server: $", server_capital_cost]) add_output( "Grid", T.div["Capital cost for all servers: $", number(total_server_cost)]) # $70/Mbps/mo # $44/server/mo power+space server_bandwidth = max(server_inbound_byte_rate, server_outbound_byte_rate) server_bandwidth_mbps = mathutil.div_ceil( int(server_bandwidth * 8), int(1e6)) server_monthly_cost = 70 * server_bandwidth_mbps + 44 add_output( "Servers", T.div["Monthly cost per server: $", server_monthly_cost]) add_output( "Users", T.div["Capital cost per user: $", number(total_server_cost / num_users)]) # reliability any_drive_failure_rate = total_drives * drive_failure_rate any_drive_MTBF = 1 // any_drive_failure_rate # in seconds any_drive_MTBF_days = any_drive_MTBF / 86400 add_output( "Drives", T.div["MTBF (any drive): ", number(any_drive_MTBF_days), " days"]) drive_replacement_monthly_cost = (float(drive_cost) * any_drive_failure_rate * 30 * 86400) add_output( "Grid", T.div["Monthly cost of replacing drives: $", number(drive_replacement_monthly_cost)]) total_server_monthly_cost = float(num_servers * server_monthly_cost + drive_replacement_monthly_cost) add_output( "Grid", T.div["Monthly cost for all servers: $", number(total_server_monthly_cost)]) add_output( "Users", T.div["Monthly cost per user: $", number(total_server_monthly_cost / num_users)]) # availability file_dBA = self.file_availability(k, n, server_dBA) user_files_dBA = self.many_files_availability( file_dBA, files_per_user) all_files_dBA = self.many_files_availability(file_dBA, total_files) add_output( "Users", T.div["availability of: ", "arbitrary file = %d dBA, " % file_dBA, "all files of user1 = %d dBA, " % user_files_dBA, "all files in grid = %d dBA" % all_files_dBA, ], ) time_until_files_lost = (n - k + 1) / any_drive_failure_rate add_output( "Grid", T.div["avg time until files are lost: ", number(time_until_files_lost, "s"), ", ", number(time_until_files_lost / 86400, " days"), ]) share_data_loss_rate = any_drive_failure_rate * drive_size add_output( "Grid", T.div["share data loss rate: ", number(share_data_loss_rate, "Bps")]) # the worst-case survival numbers occur when we do a file check # and the file is just above the threshold for repair (so we # decide to not repair it). The question is then: what is the # chance that the file will decay so badly before the next check # that we can't recover it? The resulting probability is per # check interval. # Note that the chances of us getting into this situation are low. P_disk_failure_during_interval = (drive_failure_rate * file_check_interval) disk_failure_dBF = 10 * math.log10(P_disk_failure_during_interval) disk_failure_dBA = -disk_failure_dBF file_survives_dBA = self.file_availability(k, repair_threshold, disk_failure_dBA) user_files_survives_dBA = self.many_files_availability( \ file_survives_dBA, files_per_user) all_files_survives_dBA = self.many_files_availability( \ file_survives_dBA, total_files) add_output( "Users", T.div["survival of: ", "arbitrary file = %d dBA, " % file_survives_dBA, "all files of user1 = %d dBA, " % user_files_survives_dBA, "all files in grid = %d dBA" % all_files_survives_dBA, " (per worst-case check interval)", ]) all_sections = [] all_sections.append(build_section("Users")) all_sections.append(build_section("Servers")) all_sections.append(build_section("Drives")) if "Grid" in sections: all_sections.append(build_section("Grid")) f = T.form(action=".", method="post", enctype="multipart/form-data") if filled: action = "Recompute" else: action = "Compute" f = f[T.input(type="hidden", name="filled", value="true"), T.input(type="submit", value=action), all_sections, ] try: from allmydata import reliability # we import this just to test to see if the page is available _hush_pyflakes = reliability del _hush_pyflakes f = [T.div[T.a(href="../reliability")["Reliability Math"]], f] except ImportError: pass return f def file_availability(self, k, n, server_dBA): """ The full formula for the availability of a specific file is:: 1 - sum([choose(N,i) * p**i * (1-p)**(N-i)] for i in range(k)]) Where choose(N,i) = N! / ( i! * (N-i)! ) . Note that each term of this summation is the probability that there are exactly 'i' servers available, and what we're doing is adding up the cases where i is too low. This is a nuisance to calculate at all accurately, especially once N gets large, and when p is close to unity. So we make an engineering approximation: if (1-p) is very small, then each [i] term is much larger than the [i-1] term, and the sum is dominated by the i=k-1 term. This only works for (1-p) < 10%, and when the choose() function doesn't rise fast enough to compensate. For high-expansion encodings (3-of-10, 25-of-100), the choose() function is rising at the same time as the (1-p)**(N-i) term, so that's not an issue. For low-expansion encodings (7-of-10, 75-of-100) the two values are moving in opposite directions, so more care must be taken. Note that the p**i term has only a minor effect as long as (1-p)*N is small, and even then the effect is attenuated by the 1-p term. """ assert server_dBA > 9 # >=90% availability to use the approximation factor = binomial(n, k - 1) factor_dBA = 10 * math.log10(factor) exponent = n - k + 1 file_dBA = server_dBA * exponent - factor_dBA return file_dBA def many_files_availability(self, file_dBA, num_files): """The probability that 'num_files' independent bernoulli trials will succeed (i.e. we can recover all files in the grid at any given moment) is p**num_files . Since p is close to unity, we express in p in dBA instead, so we can get useful precision on q (=1-p), and then the formula becomes:: P_some_files_unavailable = 1 - (1 - q)**num_files That (1-q)**n expands with the usual binomial sequence, 1 - nq + Xq**2 ... + Xq**n . We use the same approximation as before, since we know q is close to zero, and we get to ignore all the terms past -nq. """ many_files_dBA = file_dBA - 10 * math.log10(num_files) return many_files_dBA
class MoreInfo(rend.Page): addSlash = False docFactory = getxmlfile("info.xhtml") def abbrev(self, storage_index_or_none): if storage_index_or_none: return base32.b2a(storage_index_or_none)[:6] return "LIT file" def get_type(self): node = self.original if IDirectoryNode.providedBy(node): if not node.is_mutable(): return "immutable directory" return "directory" if IFileNode.providedBy(node): si = node.get_storage_index() if si: if node.is_mutable(): return "mutable file" return "immutable file" return "immutable LIT file" return "unknown" def render_title(self, ctx, data): node = self.original si = node.get_storage_index() t = "More Info for %s" % self.get_type() if si: t += " (SI=%s)" % self.abbrev(si) return ctx.tag[t] def render_header(self, ctx, data): return self.render_title(ctx, data) def render_type(self, ctx, data): return ctx.tag[self.get_type()] def render_si(self, ctx, data): si = self.original.get_storage_index() if not si: return "None" return ctx.tag[base32.b2a(si)] def render_size(self, ctx, data): node = self.original d = node.get_current_size() def _no_size(size): if size is None: return "?" return size d.addCallback(_no_size) def _handle_unrecoverable(f): f.trap(UnrecoverableFileError) return "?" d.addErrback(_handle_unrecoverable) d.addCallback(lambda size: ctx.tag[size]) return d def render_directory_writecap(self, ctx, data): node = self.original if not IDirectoryNode.providedBy(node): return "" if node.is_readonly(): return "" return ctx.tag[node.get_uri()] def render_directory_readcap(self, ctx, data): node = self.original if not IDirectoryNode.providedBy(node): return "" return ctx.tag[node.get_readonly_uri()] def render_directory_verifycap(self, ctx, data): node = self.original if not IDirectoryNode.providedBy(node): return "" verifier = node.get_verify_cap() if verifier: return ctx.tag[node.get_verify_cap().to_string()] return "" def render_file_writecap(self, ctx, data): node = self.original if IDirectoryNode.providedBy(node): node = node._node write_uri = node.get_write_uri() if not write_uri: return "" return ctx.tag[write_uri] def render_file_readcap(self, ctx, data): node = self.original if IDirectoryNode.providedBy(node): node = node._node read_uri = node.get_readonly_uri() if not read_uri: return "" return ctx.tag[read_uri] def render_file_verifycap(self, ctx, data): node = self.original if IDirectoryNode.providedBy(node): node = node._node verifier = node.get_verify_cap() if verifier: return ctx.tag[node.get_verify_cap().to_string()] return "" def get_root(self, ctx): req = IRequest(ctx) # the addSlash=True gives us one extra (empty) segment depth = len(req.prepath) + len(req.postpath) - 1 link = "/".join([".."] * depth) return link def render_raw_link(self, ctx, data): node = self.original if IDirectoryNode.providedBy(node): node = node._node elif IFileNode.providedBy(node): pass else: return "" root = self.get_root(ctx) quoted_uri = urllib.quote(node.get_uri()) text_plain_url = "%s/file/%s/@@named=/raw.txt" % (root, quoted_uri) return T.li["Raw data as ", T.a(href=text_plain_url)["text/plain"]] def render_is_checkable(self, ctx, data): node = self.original si = node.get_storage_index() if si: return ctx.tag # don't show checker button for LIT files return "" def render_check_form(self, ctx, data): node = self.original quoted_uri = urllib.quote(node.get_uri()) target = self.get_root(ctx) + "/uri/" + quoted_uri if IDirectoryNode.providedBy(node): target += "/" check = T.form( action=target, method="post", enctype="multipart/form-data")[T.fieldset[ T.input(type="hidden", name="t", value="check"), T.input(type="hidden", name="return_to", value="."), T.legend(class_="freeform-form-label")["Check on this object"], T.div["Verify every bit? (EXPENSIVE):", T.input(type="checkbox", name="verify"), ], T.div["Repair any problems?: ", T.input(type="checkbox", name="repair")], T.div["Add/renew lease on all shares?: ", T.input(type="checkbox", name="add-lease")], T.div["Emit results in JSON format?: ", T.input(type="checkbox", name="output", value="JSON")], T.input(type="submit", value="Check"), ]] return ctx.tag[check] def render_is_mutable_file(self, ctx, data): node = self.original if IDirectoryNode.providedBy(node): return "" if (IFileNode.providedBy(node) and node.is_mutable() and not node.is_readonly()): return ctx.tag return "" def render_overwrite_form(self, ctx, data): node = self.original root = self.get_root(ctx) action = "%s/uri/%s" % (root, urllib.quote(node.get_uri())) done_url = "%s/uri/%s?t=info" % (root, urllib.quote(node.get_uri())) overwrite = T.form( action=action, method="post", enctype="multipart/form-data")[T.fieldset[ T.input(type="hidden", name="t", value="upload"), T.input(type='hidden', name='when_done', value=done_url), T.legend(class_="freeform-form-label")["Overwrite"], "Upload new contents: ", T.input(type="file", name="file"), " ", T.input(type="submit", value="Replace Contents")]] return ctx.tag[overwrite] def render_is_directory(self, ctx, data): node = self.original if IDirectoryNode.providedBy(node): return ctx.tag return "" def render_deep_check_form(self, ctx, data): ophandle = base32.b2a(os.urandom(16)) deep_check = T.form( action=".", method="post", enctype="multipart/form-data")[T.fieldset[ T.input(type="hidden", name="t", value="start-deep-check"), T.input(type="hidden", name="return_to", value="."), T.legend(class_="freeform-form-label" )["Run a deep-check operation (EXPENSIVE)"], T.div["Verify every bit? (EVEN MORE EXPENSIVE):", T.input(type="checkbox", name="verify"), ], T.div["Repair any problems?: ", T.input(type="checkbox", name="repair")], T.div["Add/renew lease on all shares?: ", T.input(type="checkbox", name="add-lease")], T.div["Emit results in JSON format?: ", T.input(type="checkbox", name="output", value="JSON")], T.input(type="hidden", name="ophandle", value=ophandle), T.input(type="submit", value="Deep-Check"), ]] return ctx.tag[deep_check] def render_deep_size_form(self, ctx, data): ophandle = base32.b2a(os.urandom(16)) deep_size = T.form( action=".", method="post", enctype="multipart/form-data")[T.fieldset[ T.input(type="hidden", name="t", value="start-deep-size"), T.legend(class_="freeform-form-label" )["Run a deep-size operation (EXPENSIVE)"], T.input(type="hidden", name="ophandle", value=ophandle), T.input(type="submit", value="Deep-Size"), ]] return ctx.tag[deep_size] def render_deep_stats_form(self, ctx, data): ophandle = base32.b2a(os.urandom(16)) deep_stats = T.form( action=".", method="post", enctype="multipart/form-data")[T.fieldset[ T.input(type="hidden", name="t", value="start-deep-stats"), T.legend(class_="freeform-form-label" )["Run a deep-stats operation (EXPENSIVE)"], T.input(type="hidden", name="ophandle", value=ophandle), T.input(type="submit", value="Deep-Stats"), ]] return ctx.tag[deep_stats] def render_manifest_form(self, ctx, data): ophandle = base32.b2a(os.urandom(16)) manifest = T.form( action=".", method="post", enctype="multipart/form-data")[T.fieldset[ T.input(type="hidden", name="t", value="start-manifest"), T.legend(class_="freeform-form-label" )["Run a manifest operation (EXPENSIVE)"], T.div["Output Format: ", T.select(name="output")[ T.option(value="html", selected="true")["HTML"], T.option(value="text")["text"], T.option(value="json")["JSON"], ], ], T.input(type="hidden", name="ophandle", value=ophandle), T.input(type="submit", value="Manifest"), ]] return ctx.tag[manifest]
class DirectoryAsHTML(rend.Page): # The remainder of this class is to render the directory into # human+browser -oriented HTML. docFactory = getxmlfile("directory.xhtml") addSlash = True def __init__(self, node, default_mutable_format): rend.Page.__init__(self) self.node = node assert default_mutable_format in (MDMF_VERSION, SDMF_VERSION) self.default_mutable_format = default_mutable_format def beforeRender(self, ctx): # attempt to get the dirnode's children, stashing them (or the # failure that results) for later use d = self.node.list() def _good(children): # Deferreds don't optimize out tail recursion, and the way # Nevow's flattener handles Deferreds doesn't take this into # account. As a result, large lists of Deferreds that fire in the # same turn (i.e. the output of defer.succeed) will cause a stack # overflow. To work around this, we insert a turn break after # every 100 items, using foolscap's fireEventually(). This gives # the stack a chance to be popped. It would also work to put # every item in its own turn, but that'd be a lot more # inefficient. This addresses ticket #237, for which I was never # able to create a failing unit test. output = [] for i,item in enumerate(sorted(children.items())): if i % 100 == 0: output.append(fireEventually(item)) else: output.append(item) self.dirnode_children = output return ctx def _bad(f): text, code = humanize_failure(f) self.dirnode_children = None self.dirnode_children_error = text return ctx d.addCallbacks(_good, _bad) return d def render_title(self, ctx, data): si_s = abbreviated_dirnode(self.node) header = ["Tahoe-LAFS - Directory SI=%s" % si_s] if self.node.is_unknown(): header.append(" (unknown)") elif not self.node.is_mutable(): header.append(" (immutable)") elif self.node.is_readonly(): header.append(" (read-only)") else: header.append(" (modifiable)") return ctx.tag[header] def render_header(self, ctx, data): si_s = abbreviated_dirnode(self.node) header = ["Tahoe-LAFS Directory SI=", T.span(class_="data-chars")[si_s]] if self.node.is_unknown(): header.append(" (unknown)") elif not self.node.is_mutable(): header.append(" (immutable)") elif self.node.is_readonly(): header.append(" (read-only)") return ctx.tag[header] def render_welcome(self, ctx, data): link = get_root(ctx) return ctx.tag[T.a(href=link)["Return to Welcome page"]] def render_show_readonly(self, ctx, data): if self.node.is_unknown() or self.node.is_readonly(): return "" rocap = self.node.get_readonly_uri() root = get_root(ctx) uri_link = "%s/uri/%s/" % (root, urllib.quote(rocap)) return ctx.tag[T.a(href=uri_link)["Read-Only Version"]] def render_try_children(self, ctx, data): # if the dirnode can be retrived, render a table of children. # Otherwise, render an apologetic error message. if self.dirnode_children is not None: return ctx.tag else: return T.div[T.p["Error reading directory:"], T.p[self.dirnode_children_error]] def data_children(self, ctx, data): return self.dirnode_children def render_row(self, ctx, data): name, (target, metadata) = data name = name.encode("utf-8") assert not isinstance(name, unicode) nameurl = urllib.quote(name, safe="") # encode any slashes too root = get_root(ctx) here = "%s/uri/%s/" % (root, urllib.quote(self.node.get_uri())) if self.node.is_unknown() or self.node.is_readonly(): unlink = "-" rename = "-" else: # this creates a button which will cause our _POST_unlink method # to be invoked, which unlinks the file and then redirects the # browser back to this directory unlink = T.form(action=here, method="post")[ T.input(type='hidden', name='t', value='unlink'), T.input(type='hidden', name='name', value=name), T.input(type='hidden', name='when_done', value="."), T.input(type='submit', value='unlink', name="unlink"), ] rename = T.form(action=here, method="get")[ T.input(type='hidden', name='t', value='rename-form'), T.input(type='hidden', name='name', value=name), T.input(type='hidden', name='when_done', value="."), T.input(type='submit', value='rename/relink', name="rename"), ] ctx.fillSlots("unlink", unlink) ctx.fillSlots("rename", rename) times = [] linkcrtime = metadata.get('tahoe', {}).get("linkcrtime") if linkcrtime is not None: times.append("lcr: " + time_format.iso_local(linkcrtime)) else: # For backwards-compatibility with links last modified by Tahoe < 1.4.0: if "ctime" in metadata: ctime = time_format.iso_local(metadata["ctime"]) times.append("c: " + ctime) linkmotime = metadata.get('tahoe', {}).get("linkmotime") if linkmotime is not None: if times: times.append(T.br()) times.append("lmo: " + time_format.iso_local(linkmotime)) else: # For backwards-compatibility with links last modified by Tahoe < 1.4.0: if "mtime" in metadata: mtime = time_format.iso_local(metadata["mtime"]) if times: times.append(T.br()) times.append("m: " + mtime) ctx.fillSlots("times", times) assert IFilesystemNode.providedBy(target), target target_uri = target.get_uri() or "" quoted_uri = urllib.quote(target_uri, safe="") # escape slashes too if IMutableFileNode.providedBy(target): # to prevent javascript in displayed .html files from stealing a # secret directory URI from the URL, send the browser to a URI-based # page that doesn't know about the directory at all dlurl = "%s/file/%s/@@named=/%s" % (root, quoted_uri, nameurl) ctx.fillSlots("filename", T.a(href=dlurl)[name]) ctx.fillSlots("type", "SSK") ctx.fillSlots("size", "?") info_link = "%s/uri/%s?t=info" % (root, quoted_uri) elif IImmutableFileNode.providedBy(target): dlurl = "%s/file/%s/@@named=/%s" % (root, quoted_uri, nameurl) ctx.fillSlots("filename", T.a(href=dlurl)[name]) ctx.fillSlots("type", "FILE") ctx.fillSlots("size", target.get_size()) info_link = "%s/uri/%s?t=info" % (root, quoted_uri) elif IDirectoryNode.providedBy(target): # directory uri_link = "%s/uri/%s/" % (root, urllib.quote(target_uri)) ctx.fillSlots("filename", T.a(href=uri_link)[name]) if not target.is_mutable(): dirtype = "DIR-IMM" elif target.is_readonly(): dirtype = "DIR-RO" else: dirtype = "DIR" ctx.fillSlots("type", dirtype) ctx.fillSlots("size", "-") info_link = "%s/uri/%s/?t=info" % (root, quoted_uri) elif isinstance(target, ProhibitedNode): ctx.fillSlots("filename", T.strike[name]) if IDirectoryNode.providedBy(target.wrapped_node): blacklisted_type = "DIR-BLACKLISTED" else: blacklisted_type = "BLACKLISTED" ctx.fillSlots("type", blacklisted_type) ctx.fillSlots("size", "-") info_link = None ctx.fillSlots("info", ["Access Prohibited:", T.br, target.reason]) else: # unknown ctx.fillSlots("filename", name) if target.get_write_uri() is not None: unknowntype = "?" elif not self.node.is_mutable() or target.is_alleged_immutable(): unknowntype = "?-IMM" else: unknowntype = "?-RO" ctx.fillSlots("type", unknowntype) ctx.fillSlots("size", "-") # use a directory-relative info link, so we can extract both the # writecap and the readcap info_link = "%s?t=info" % urllib.quote(name) if info_link: ctx.fillSlots("info", T.a(href=info_link)["More Info"]) return ctx.tag # XXX: similar to render_upload_form and render_mkdir_form in root.py. def render_forms(self, ctx, data): forms = [] if self.node.is_readonly(): return T.div["No upload forms: directory is read-only"] if self.dirnode_children is None: return T.div["No upload forms: directory is unreadable"] mkdir_sdmf = T.input(type='radio', name='format', value='sdmf', id='mkdir-sdmf', checked='checked') mkdir_mdmf = T.input(type='radio', name='format', value='mdmf', id='mkdir-mdmf') mkdir_form = T.form(action=".", method="post", enctype="multipart/form-data")[ T.fieldset[ T.input(type="hidden", name="t", value="mkdir"), T.input(type="hidden", name="when_done", value="."), T.legend(class_="freeform-form-label")["Create a new directory in this directory"], "New directory name:"+SPACE, T.input(type="text", name="name"), SPACE, T.input(type="submit", value="Create"), SPACE*2, mkdir_sdmf, T.label(for_='mutable-directory-sdmf')[" SDMF"], SPACE, mkdir_mdmf, T.label(for_='mutable-directory-mdmf')[" MDMF (experimental)"], ]] forms.append(T.div(class_="freeform-form")[mkdir_form]) upload_chk = T.input(type='radio', name='format', value='chk', id='upload-chk', checked='checked') upload_sdmf = T.input(type='radio', name='format', value='sdmf', id='upload-sdmf') upload_mdmf = T.input(type='radio', name='format', value='mdmf', id='upload-mdmf') upload_form = T.form(action=".", method="post", enctype="multipart/form-data")[ T.fieldset[ T.input(type="hidden", name="t", value="upload"), T.input(type="hidden", name="when_done", value="."), T.legend(class_="freeform-form-label")["Upload a file to this directory"], "Choose a file to upload:"+SPACE, T.input(type="file", name="file", class_="freeform-input-file"), SPACE, T.input(type="submit", value="Upload"), SPACE*2, upload_chk, T.label(for_="upload-chk") [" Immutable"], SPACE, upload_sdmf, T.label(for_="upload-sdmf")[" SDMF"], SPACE, upload_mdmf, T.label(for_="upload-mdmf")[" MDMF (experimental)"], ]] forms.append(T.div(class_="freeform-form")[upload_form]) attach_form = T.form(action=".", method="post", enctype="multipart/form-data")[ T.fieldset[ T.input(type="hidden", name="t", value="uri"), T.input(type="hidden", name="when_done", value="."), T.legend(class_="freeform-form-label")["Add a link to a file or directory which is already in Tahoe-LAFS."], "New child name:"+SPACE, T.input(type="text", name="name"), SPACE*2, "URI of new child:"+SPACE, T.input(type="text", name="uri"), SPACE, T.input(type="submit", value="Attach"), ]] forms.append(T.div(class_="freeform-form")[attach_form]) return forms def render_results(self, ctx, data): req = IRequest(ctx) return get_arg(req, "results", "")
class Root(rend.Page): addSlash = True docFactory = getxmlfile("welcome.xhtml") def __init__(self, client, clock=None): rend.Page.__init__(self, client) self.client = client # If set, clock is a twisted.internet.task.Clock that the tests # use to test ophandle expiration. self.child_operations = operations.OphandleTable(clock) try: s = client.getServiceNamed("storage") except KeyError: s = None self.child_storage = storage.StorageStatus(s, self.client.nickname) self.child_uri = URIHandler(client) self.child_cap = URIHandler(client) self.child_file = FileHandler(client) self.child_named = FileHandler(client) self.child_status = status.Status(client.get_history()) self.child_statistics = status.Statistics(client.stats_provider) static_dir = resource_filename("allmydata.web", "static") for filen in os.listdir(static_dir): self.putChild(filen, nevow_File(os.path.join(static_dir, filen))) def child_helper_status(self, ctx): # the Helper isn't attached until after the Tub starts, so this child # needs to created on each request return status.HelperStatus(self.client.helper) child_report_incident = IncidentReporter() #child_server # let's reserve this for storage-server-over-HTTP # FIXME: This code is duplicated in root.py and introweb.py. def data_version(self, ctx, data): return get_package_versions_string() def data_import_path(self, ctx, data): return str(allmydata) def data_my_nodeid(self, ctx, data): return idlib.nodeid_b2a(self.client.nodeid) def data_my_nickname(self, ctx, data): return self.client.nickname def render_services(self, ctx, data): ul = T.ul() try: ss = self.client.getServiceNamed("storage") stats = ss.get_stats() if stats["storage_server.accepting_immutable_shares"]: msg = "accepting new shares" else: msg = "not accepting new shares (read-only)" available = stats.get("storage_server.disk_avail") if available is not None: msg += ", %s available" % abbreviate_size(available) ul[T.li[T.a(href="storage")["Storage Server"], ": ", msg]] except KeyError: ul[T.li["Not running storage server"]] if self.client.helper: stats = self.client.helper.get_stats() active_uploads = stats["chk_upload_helper.active_uploads"] ul[T.li["Helper: %d active uploads" % (active_uploads, )]] else: ul[T.li["Not running helper"]] return ctx.tag[ul] def data_introducer_furl(self, ctx, data): return self.client.introducer_furl def data_connected_to_introducer(self, ctx, data): if self.client.connected_to_introducer(): return "yes" return "no" def data_helper_furl(self, ctx, data): try: uploader = self.client.getServiceNamed("uploader") except KeyError: return None furl, connected = uploader.get_helper_info() return furl def data_connected_to_helper_description(self, ctx, data): return self.data_connected_to_helper(ctx, data).replace('-', ' ') def data_connected_to_helper(self, ctx, data): try: uploader = self.client.getServiceNamed("uploader") except KeyError: return "no" # we don't even have an Uploader furl, connected = uploader.get_helper_info() if furl is None: return "not-configured" if connected: return "yes" return "no" def data_known_storage_servers(self, ctx, data): sb = self.client.get_storage_broker() return len(sb.get_all_serverids()) def data_connected_storage_servers(self, ctx, data): sb = self.client.get_storage_broker() return len(sb.get_connected_servers()) def data_services(self, ctx, data): sb = self.client.get_storage_broker() return sorted(sb.get_known_servers(), key=lambda s: s.get_serverid()) def render_service_row(self, ctx, server): nodeid = server.get_serverid() ctx.fillSlots("peerid", server.get_longname()) ctx.fillSlots("nickname", server.get_nickname()) rhost = server.get_remote_host() if rhost: if nodeid == self.client.nodeid: rhost_s = "(loopback)" elif isinstance(rhost, address.IPv4Address): rhost_s = "%s:%d" % (rhost.host, rhost.port) else: rhost_s = str(rhost) connected = "Yes: to " + rhost_s since = server.get_last_connect_time() else: connected = "No" since = server.get_last_loss_time() announced = server.get_announcement_time() announcement = server.get_announcement() version = announcement["my-version"] service_name = announcement["service-name"] TIME_FORMAT = "%H:%M:%S %d-%b-%Y" ctx.fillSlots("connected", connected) ctx.fillSlots("connected-bool", bool(rhost)) ctx.fillSlots("since", time.strftime(TIME_FORMAT, time.localtime(since))) ctx.fillSlots("announced", time.strftime(TIME_FORMAT, time.localtime(announced))) ctx.fillSlots("version", version) ctx.fillSlots("service_name", service_name) return ctx.tag def render_download_form(self, ctx, data): # this is a form where users can download files by URI form = T.form( action="uri", method="get", enctype="multipart/form-data")[T.fieldset[ T.legend(class_="freeform-form-label")["Download a file"], T.div["Tahoe-URI to download:" + SPACE, T.input(type="text", name="uri")], T.div["Filename to download as:" + SPACE, T.input(type="text", name="filename")], T.input(type="submit", value="Download!"), ]] return T.div[form] def render_view_form(self, ctx, data): # this is a form where users can download files by URI, or jump to a # named directory form = T.form( action="uri", method="get", enctype="multipart/form-data")[ T.fieldset[T.legend( class_="freeform-form-label")["View a file or directory"], "Tahoe-URI to view:" + SPACE, T.input(type="text", name="uri"), SPACE * 2, T.input(type="submit", value="View!"), ]] return T.div[form] def render_upload_form(self, ctx, data): # This is a form where users can upload unlinked files. # Users can choose immutable, SDMF, or MDMF from a radio button. upload_chk = T.input(type='radio', name='format', value='chk', id='upload-chk', checked='checked') upload_sdmf = T.input(type='radio', name='format', value='sdmf', id='upload-sdmf') upload_mdmf = T.input(type='radio', name='format', value='mdmf', id='upload-mdmf') form = T.form( action="uri", method="post", enctype="multipart/form-data" )[T.fieldset[T.legend( class_="freeform-form-label")["Upload a file"], T.div[ "Choose a file:" + SPACE, T.input(type="file", name="file", class_="freeform-input-file" )], T.input(type="hidden", name="t", value="upload"), T.div[upload_chk, T.label(for_="upload-chk")[" Immutable"], SPACE, upload_sdmf, T.label(for_="upload-sdmf")[" SDMF"], SPACE, upload_mdmf, T.label(for_="upload-mdmf")[" MDMF (experimental)"], SPACE * 2, T.input(type="submit", value="Upload!")], ]] return T.div[form] def render_mkdir_form(self, ctx, data): # This is a form where users can create new directories. # Users can choose SDMF or MDMF from a radio button. mkdir_sdmf = T.input(type='radio', name='format', value='sdmf', id='mkdir-sdmf', checked='checked') mkdir_mdmf = T.input(type='radio', name='format', value='mdmf', id='mkdir-mdmf') form = T.form( action="uri", method="post", enctype="multipart/form-data" )[T.fieldset[ T.legend(class_="freeform-form-label")["Create a directory"], mkdir_sdmf, T.label(for_='mkdir-sdmf')[" SDMF"], SPACE, mkdir_mdmf, T.label(for_='mkdir-mdmf')[" MDMF (experimental)"], SPACE * 2, T.input(type="hidden", name="t", value="mkdir"), T.input(type="hidden", name="redirect_to_result", value="true"), T.input(type="submit", value="Create a directory"), ]] return T.div[form] def render_incident_button(self, ctx, data): # this button triggers a foolscap-logging "incident" form = T.form( action="report_incident", method="post", enctype="multipart/form-data")[T.fieldset[ T.legend(class_="freeform-form-label")["Report an Incident"], T.input(type="hidden", name="t", value="report-incident"), "What went wrong?:" + SPACE, T.input(type="text", name="details"), SPACE, T.input(type="submit", value="Report!"), ]] return T.div[form]
class IntroducerRoot(rend.Page): addSlash = True docFactory = getxmlfile("introducer.xhtml") child_operations = None def __init__(self, introducer_node): self.introducer_node = introducer_node self.introducer_service = introducer_node.getServiceNamed("introducer") rend.Page.__init__(self, introducer_node) static_dir = resource_filename("allmydata.web", "static") for filen in os.listdir(static_dir): self.putChild(filen, nevow_File(os.path.join(static_dir, filen))) def renderHTTP(self, ctx): t = get_arg(inevow.IRequest(ctx), "t") if t == "json": return self.render_JSON(ctx) return rend.Page.renderHTTP(self, ctx) def render_JSON(self, ctx): res = {} counts = {} for s in self.introducer_service.get_subscribers(): if s.service_name not in counts: counts[s.service_name] = 0 counts[s.service_name] += 1 res["subscription_summary"] = counts announcement_summary = {} for ad in self.introducer_service.get_announcements(): service_name = ad.service_name if service_name not in announcement_summary: announcement_summary[service_name] = 0 announcement_summary[service_name] += 1 res["announcement_summary"] = announcement_summary return simplejson.dumps(res, indent=1) + "\n" # FIXME: This code is duplicated in root.py and introweb.py. def data_rendered_at(self, ctx, data): return time.strftime(TIME_FORMAT, time.localtime()) def data_version(self, ctx, data): return get_package_versions_string() def data_import_path(self, ctx, data): return str(allmydata).replace("/", "/ ") # XXX kludge for wrapping def data_my_nodeid(self, ctx, data): return idlib.nodeid_b2a(self.introducer_node.nodeid) def render_announcement_summary(self, ctx, data): services = {} for ad in self.introducer_service.get_announcements(): if ad.service_name not in services: services[ad.service_name] = 0 services[ad.service_name] += 1 service_names = services.keys() service_names.sort() return ", ".join([ "%s: %d" % (service_name, services[service_name]) for service_name in service_names ]) def render_client_summary(self, ctx, data): counts = {} for s in self.introducer_service.get_subscribers(): if s.service_name not in counts: counts[s.service_name] = 0 counts[s.service_name] += 1 return ", ".join([ "%s: %d" % (name, counts[name]) for name in sorted(counts.keys()) ]) def data_services(self, ctx, data): services = self.introducer_service.get_announcements(False) services.sort(key=lambda ad: (ad.service_name, ad.nickname)) return services def render_service_row(self, ctx, ad): ctx.fillSlots("serverid", ad.serverid) ctx.fillSlots("nickname", ad.nickname) ctx.fillSlots("connection-hints", "connection hints: " + " ".join(ad.connection_hints)) ctx.fillSlots("connected", "?") when_s = time.strftime("%H:%M:%S %d-%b-%Y", time.localtime(ad.when)) ctx.fillSlots("announced", when_s) ctx.fillSlots("version", ad.version) ctx.fillSlots("service_name", ad.service_name) return ctx.tag def data_subscribers(self, ctx, data): return self.introducer_service.get_subscribers() def render_subscriber_row(self, ctx, s): ctx.fillSlots("nickname", s.nickname) ctx.fillSlots("tubid", s.tubid) ctx.fillSlots("connected", s.remote_address) since_s = time.strftime("%H:%M:%S %d-%b-%Y", time.localtime(s.when)) ctx.fillSlots("since", since_s) ctx.fillSlots("version", s.version) ctx.fillSlots("service_name", s.service_name) return ctx.tag
def test_load_file(self): # This will raise an exception unless a well-formed XML file is found under that name. common.getxmlfile('directory.xhtml').load()
class Statistics(MultiFormatPage): docFactory = getxmlfile("statistics.xhtml") def __init__(self, provider): rend.Page.__init__(self, provider) self.provider = provider def render_JSON(self, req): stats = self.provider.get_stats() req.setHeader("content-type", "text/plain") return json.dumps(stats, indent=1) + "\n" def data_get_stats(self, ctx, data): return self.provider.get_stats() def render_load_average(self, ctx, data): return str(data["stats"].get("load_monitor.avg_load")) def render_peak_load(self, ctx, data): return str(data["stats"].get("load_monitor.max_load")) def render_uploads(self, ctx, data): files = data["counters"].get("uploader.files_uploaded", 0) bytes = data["counters"].get("uploader.bytes_uploaded", 0) return ("%s files / %s bytes (%s)" % (files, bytes, abbreviate_size(bytes))) def render_downloads(self, ctx, data): files = data["counters"].get("downloader.files_downloaded", 0) bytes = data["counters"].get("downloader.bytes_downloaded", 0) return ("%s files / %s bytes (%s)" % (files, bytes, abbreviate_size(bytes))) def render_publishes(self, ctx, data): files = data["counters"].get("mutable.files_published", 0) bytes = data["counters"].get("mutable.bytes_published", 0) return "%s files / %s bytes (%s)" % (files, bytes, abbreviate_size(bytes)) def render_retrieves(self, ctx, data): files = data["counters"].get("mutable.files_retrieved", 0) bytes = data["counters"].get("mutable.bytes_retrieved", 0) return "%s files / %s bytes (%s)" % (files, bytes, abbreviate_size(bytes)) def render_magic_uploader_monitored(self, ctx, data): dirs = data["counters"].get("magic_folder.uploader.dirs_monitored", 0) return "%s directories" % (dirs, ) def render_magic_uploader_succeeded(self, ctx, data): # TODO: bytes uploaded files = data["counters"].get("magic_folder.uploader.objects_succeeded", 0) return "%s files" % (files, ) def render_magic_uploader_queued(self, ctx, data): files = data["counters"].get("magic_folder.uploader.objects_queued", 0) return "%s files" % (files, ) def render_magic_uploader_failed(self, ctx, data): files = data["counters"].get("magic_folder.uploader.objects_failed", 0) return "%s files" % (files, ) def render_magic_downloader_succeeded(self, ctx, data): # TODO: bytes uploaded files = data["counters"].get( "magic_folder.downloader.objects_succeeded", 0) return "%s files" % (files, ) def render_magic_downloader_queued(self, ctx, data): files = data["counters"].get("magic_folder.downloader.objects_queued", 0) return "%s files" % (files, ) def render_magic_downloader_failed(self, ctx, data): files = data["counters"].get("magic_folder.downloader.objects_failed", 0) return "%s files" % (files, ) def render_raw(self, ctx, data): raw = pprint.pformat(data) return ctx.tag[raw]
class IntroducerRoot(rend.Page): addSlash = True docFactory = getxmlfile("introducer.xhtml") child_operations = None def __init__(self, introducer_node): self.introducer_node = introducer_node self.introducer_service = introducer_node.getServiceNamed("introducer") rend.Page.__init__(self, introducer_node) def renderHTTP(self, ctx): t = get_arg(inevow.IRequest(ctx), "t") if t == "json": return self.render_JSON(ctx) return rend.Page.renderHTTP(self, ctx) def render_JSON(self, ctx): res = {} clients = self.introducer_service.get_subscribers() subscription_summary = dict([ (name, len(clients[name])) for name in clients ]) res["subscription_summary"] = subscription_summary announcement_summary = {} service_hosts = {} for (ann,when) in self.introducer_service.get_announcements().values(): (furl, service_name, ri_name, nickname, ver, oldest) = ann if service_name not in announcement_summary: announcement_summary[service_name] = 0 announcement_summary[service_name] += 1 if service_name not in service_hosts: service_hosts[service_name] = set() # it's nice to know how many distinct hosts are available for # each service. We define a "host" by a set of addresses # (hostnames or ipv4 addresses), which we extract from the # connection hints. In practice, this is usually close # enough: when multiple services are run on a single host, # they're usually either configured with the same addresses, # or setLocationAutomatically picks up the same interfaces. locations = SturdyRef(furl).getTubRef().getLocations() # list of tuples, ("ipv4", host, port) host = frozenset([hint[1] for hint in locations if hint[0] == "ipv4"]) service_hosts[service_name].add(host) res["announcement_summary"] = announcement_summary distinct_hosts = dict([(name, len(hosts)) for (name, hosts) in service_hosts.iteritems()]) res["announcement_distinct_hosts"] = distinct_hosts return simplejson.dumps(res, indent=1) + "\n" # FIXME: This code is duplicated in root.py and introweb.py. def data_version(self, ctx, data): return get_package_versions_string() def data_import_path(self, ctx, data): return str(allmydata).replace("/", "/ ") # XXX kludge for wrapping def data_my_nodeid(self, ctx, data): return idlib.nodeid_b2a(self.introducer_node.nodeid) def render_announcement_summary(self, ctx, data): services = {} for (ann,when) in self.introducer_service.get_announcements().values(): (furl, service_name, ri_name, nickname, ver, oldest) = ann if service_name not in services: services[service_name] = 0 services[service_name] += 1 service_names = services.keys() service_names.sort() return ", ".join(["%s: %d" % (service_name, services[service_name]) for service_name in service_names]) def render_client_summary(self, ctx, data): clients = self.introducer_service.get_subscribers() service_names = clients.keys() service_names.sort() return ", ".join(["%s: %d" % (service_name, len(clients[service_name])) for service_name in service_names]) def data_services(self, ctx, data): introsvc = self.introducer_service ann = [(since,a) for (a,since) in introsvc.get_announcements().values() if a[1] != "stub_client"] ann.sort(lambda a,b: cmp( (a[1][1], a), (b[1][1], b) ) ) return ann def render_service_row(self, ctx, (since,announcement)): (furl, service_name, ri_name, nickname, ver, oldest) = announcement sr = SturdyRef(furl) nodeid = sr.tubID advertised = self.show_location_hints(sr) ctx.fillSlots("peerid", "%s %s" % (nodeid, nickname)) ctx.fillSlots("advertised", " ".join(advertised)) ctx.fillSlots("connected", "?") TIME_FORMAT = "%H:%M:%S %d-%b-%Y" ctx.fillSlots("announced", time.strftime(TIME_FORMAT, time.localtime(since))) ctx.fillSlots("version", ver) ctx.fillSlots("service_name", service_name) return ctx.tag
class RetrieveStatusPage(rend.Page, RateAndTimeMixin): docFactory = getxmlfile("retrieve-status.xhtml") def __init__(self, data): rend.Page.__init__(self, data) self.retrieve_status = data def render_started(self, ctx, data): started_s = render_time(data.get_started()) return started_s def render_si(self, ctx, data): si_s = base32.b2a_or_none(data.get_storage_index()) if si_s is None: si_s = "(None)" return si_s def render_helper(self, ctx, data): return {True: "Yes", False: "No"}[data.using_helper()] def render_current_size(self, ctx, data): size = data.get_size() if size is None: size = "(unknown)" return size def render_progress(self, ctx, data): progress = data.get_progress() # TODO: make an ascii-art bar return "%.1f%%" % (100.0 * progress) def render_status(self, ctx, data): return data.get_status() def render_encoding(self, ctx, data): k, n = data.get_encoding() return ctx.tag["Encoding: %s of %s" % (k, n)] def render_problems(self, ctx, data): problems = data.get_problems() if not problems: return "" l = T.ul() for peerid in sorted(problems.keys()): peerid_s = idlib.shortnodeid_b2a(peerid) l[T.li["[%s]: %s" % (peerid_s, problems[peerid])]] return ctx.tag["Server Problems:", l] def _get_rate(self, data, name): file_size = self.retrieve_status.get_size() duration = self.retrieve_status.timings.get(name) return compute_rate(file_size, duration) def data_time_total(self, ctx, data): return self.retrieve_status.timings.get("total") def data_rate_total(self, ctx, data): return self._get_rate(data, "total") def data_time_fetch(self, ctx, data): return self.retrieve_status.timings.get("fetch") def data_rate_fetch(self, ctx, data): return self._get_rate(data, "fetch") def data_time_decode(self, ctx, data): return self.retrieve_status.timings.get("decode") def data_rate_decode(self, ctx, data): return self._get_rate(data, "decode") def data_time_decrypt(self, ctx, data): return self.retrieve_status.timings.get("decrypt") def data_rate_decrypt(self, ctx, data): return self._get_rate(data, "decrypt") def render_server_timings(self, ctx, data): per_server = self.retrieve_status.timings.get("fetch_per_server") if not per_server: return "" l = T.ul() for server in sorted(per_server.keys(), key=lambda s: s.get_name()): times_s = ", ".join( [self.render_time(None, t) for t in per_server[server]]) l[T.li["[%s]: %s" % (server.get_name(), times_s)]] return T.li["Per-Server Fetch Response Times: ", l]
class Status(rend.Page): docFactory = getxmlfile("status.xhtml") addSlash = True def __init__(self, history): rend.Page.__init__(self, history) self.history = history def renderHTTP(self, ctx): req = inevow.IRequest(ctx) t = get_arg(req, "t") if t == "json": return self.json(req) return rend.Page.renderHTTP(self, ctx) def json(self, req): req.setHeader("content-type", "text/plain") data = {} data["active"] = active = [] for s in self._get_active_operations(): si_s = base32.b2a_or_none(s.get_storage_index()) size = s.get_size() status = s.get_status() if IUploadStatus.providedBy(s): h, c, e = s.get_progress() active.append({ "type": "upload", "storage-index-string": si_s, "total-size": size, "status": status, "progress-hash": h, "progress-ciphertext": c, "progress-encode-push": e, }) elif IDownloadStatus.providedBy(s): active.append({ "type": "download", "storage-index-string": si_s, "total-size": size, "status": status, "progress": s.get_progress(), }) return simplejson.dumps(data, indent=1) + "\n" def _get_all_statuses(self): h = self.history return itertools.chain( h.list_all_upload_statuses(), h.list_all_download_statuses(), h.list_all_mapupdate_statuses(), h.list_all_publish_statuses(), h.list_all_retrieve_statuses(), h.list_all_helper_statuses(), ) def data_active_operations(self, ctx, data): return self._get_active_operations() def _get_active_operations(self): active = [s for s in self._get_all_statuses() if s.get_active()] return active def data_recent_operations(self, ctx, data): return self._get_recent_operations() def _get_recent_operations(self): recent = [s for s in self._get_all_statuses() if not s.get_active()] recent.sort(lambda a, b: cmp(a.get_started(), b.get_started())) recent.reverse() return recent def render_row(self, ctx, data): s = data TIME_FORMAT = "%H:%M:%S %d-%b-%Y" started_s = time.strftime(TIME_FORMAT, time.localtime(s.get_started())) ctx.fillSlots("started", started_s) si_s = base32.b2a_or_none(s.get_storage_index()) if si_s is None: si_s = "(None)" ctx.fillSlots("si", si_s) ctx.fillSlots("helper", {True: "Yes", False: "No"}[s.using_helper()]) size = s.get_size() if size is None: size = "(unknown)" elif isinstance(size, (int, long, float)): size = abbreviate_size(size) ctx.fillSlots("total_size", size) progress = data.get_progress() if IUploadStatus.providedBy(data): link = "up-%d" % data.get_counter() ctx.fillSlots("type", "upload") # TODO: make an ascii-art bar (chk, ciphertext, encandpush) = progress progress_s = ("hash: %.1f%%, ciphertext: %.1f%%, encode: %.1f%%" % ((100.0 * chk), (100.0 * ciphertext), (100.0 * encandpush))) ctx.fillSlots("progress", progress_s) elif IDownloadStatus.providedBy(data): link = "down-%d" % data.get_counter() ctx.fillSlots("type", "download") ctx.fillSlots("progress", "%.1f%%" % (100.0 * progress)) elif IPublishStatus.providedBy(data): link = "publish-%d" % data.get_counter() ctx.fillSlots("type", "publish") ctx.fillSlots("progress", "%.1f%%" % (100.0 * progress)) elif IRetrieveStatus.providedBy(data): ctx.fillSlots("type", "retrieve") link = "retrieve-%d" % data.get_counter() ctx.fillSlots("progress", "%.1f%%" % (100.0 * progress)) else: assert IServermapUpdaterStatus.providedBy(data) ctx.fillSlots("type", "mapupdate %s" % data.get_mode()) link = "mapupdate-%d" % data.get_counter() ctx.fillSlots("progress", "%.1f%%" % (100.0 * progress)) ctx.fillSlots("status", T.a(href=link)[s.get_status()]) return ctx.tag def childFactory(self, ctx, name): h = self.history stype, count_s = name.split("-") count = int(count_s) if stype == "up": for s in itertools.chain(h.list_all_upload_statuses(), h.list_all_helper_statuses()): # immutable-upload helpers use the same status object as a # regular immutable-upload if s.get_counter() == count: return UploadStatusPage(s) if stype == "down": for s in h.list_all_download_statuses(): if s.get_counter() == count: return DownloadStatusPage(s) if stype == "mapupdate": for s in h.list_all_mapupdate_statuses(): if s.get_counter() == count: return MapupdateStatusPage(s) if stype == "publish": for s in h.list_all_publish_statuses(): if s.get_counter() == count: return PublishStatusPage(s) if stype == "retrieve": for s in h.list_all_retrieve_statuses(): if s.get_counter() == count: return RetrieveStatusPage(s)
class MapupdateStatusPage(rend.Page, RateAndTimeMixin): docFactory = getxmlfile("map-update-status.xhtml") def __init__(self, data): rend.Page.__init__(self, data) self.update_status = data def render_started(self, ctx, data): started_s = render_time(data.get_started()) return started_s def render_finished(self, ctx, data): when = data.get_finished() if not when: return "not yet" started_s = render_time(data.get_finished()) return started_s def render_si(self, ctx, data): si_s = base32.b2a_or_none(data.get_storage_index()) if si_s is None: si_s = "(None)" return si_s def render_helper(self, ctx, data): return {True: "Yes", False: "No"}[data.using_helper()] def render_progress(self, ctx, data): progress = data.get_progress() # TODO: make an ascii-art bar return "%.1f%%" % (100.0 * progress) def render_status(self, ctx, data): return data.get_status() def render_problems(self, ctx, data): problems = data.problems if not problems: return "" l = T.ul() for peerid in sorted(problems.keys()): peerid_s = idlib.shortnodeid_b2a(peerid) l[T.li["[%s]: %s" % (peerid_s, problems[peerid])]] return ctx.tag["Server Problems:", l] def render_privkey_from(self, ctx, data): server = data.get_privkey_from() if server: return ctx.tag["Got privkey from: [%s]" % server.get_name()] else: return "" def data_time_total(self, ctx, data): return self.update_status.timings.get("total") def data_time_initial_queries(self, ctx, data): return self.update_status.timings.get("initial_queries") def data_time_cumulative_verify(self, ctx, data): return self.update_status.timings.get("cumulative_verify") def render_server_timings(self, ctx, data): per_server = self.update_status.timings.get("per_server") if not per_server: return "" l = T.ul() for server in sorted(per_server.keys(), key=lambda s: s.get_name()): times = [] for op, started, t in per_server[server]: #times.append("%s/%.4fs/%s/%s" % (op, # started, # self.render_time(None, started - self.update_status.get_started()), # self.render_time(None,t))) if op == "query": times.append(self.render_time(None, t)) elif op == "late": times.append("late(" + self.render_time(None, t) + ")") else: times.append("privkey(" + self.render_time(None, t) + ")") times_s = ", ".join(times) l[T.li["[%s]: %s" % (server.get_name(), times_s)]] return T.li["Per-Server Response Times: ", l]
class DownloadStatusPage(DownloadResultsRendererMixin, rend.Page): docFactory = getxmlfile("download-status.xhtml") def __init__(self, data): rend.Page.__init__(self, data) self.download_status = data self.putChild("event_json", _EventJson(self.download_status)) def download_results(self): return defer.maybeDeferred(self.download_status.get_results) def relative_time(self, t): if t is None: return t if self.download_status.first_timestamp is not None: return t - self.download_status.first_timestamp return t def short_relative_time(self, t): t = self.relative_time(t) if t is None: return "" return "+%.6fs" % t def render_timeline_link(self, ctx, data): from nevow import url return T.a(href=url.URL.fromContext(ctx).child("timeline"))["timeline"] def _rate_and_time(self, bytes, seconds): time_s = self.render_time(None, seconds) if seconds != 0: rate = self.render_rate(None, 1.0 * bytes / seconds) return T.span(title=rate)[time_s] return T.span[time_s] def render_events(self, ctx, data): if not self.download_status.storage_index: return srt = self.short_relative_time l = T.div() t = T.table(align="left", class_="status-download-events") t[T.tr[T.th["serverid"], T.th["sent"], T.th["received"], T.th["shnums"], T.th["RTT"]]] for d_ev in self.download_status.dyhb_requests: server = d_ev["server"] sent = d_ev["start_time"] shnums = d_ev["response_shnums"] received = d_ev["finish_time"] rtt = None if received is not None: rtt = received - sent if not shnums: shnums = ["-"] t[T.tr(style="background: %s" % _color(server))[[ T.td[server.get_name()], T.td[srt(sent)], T.td[srt(received)], T.td[",".join([str(shnum) for shnum in shnums])], T.td[self.render_time(None, rtt)], ]]] l[T.h2["DYHB Requests:"], t] l[T.br(clear="all")] t = T.table(align="left", class_="status-download-events") t[T.tr[T.th["range"], T.th["start"], T.th["finish"], T.th["got"], T.th["time"], T.th["decrypttime"], T.th["pausedtime"], T.th["speed"]]] for r_ev in self.download_status.read_events: start = r_ev["start"] length = r_ev["length"] bytes = r_ev["bytes_returned"] decrypt_time = "" if bytes: decrypt_time = self._rate_and_time(bytes, r_ev["decrypt_time"]) speed, rtt = "", "" if r_ev["finish_time"] is not None: rtt = r_ev["finish_time"] - r_ev["start_time"] - r_ev[ "paused_time"] speed = self.render_rate(None, compute_rate(bytes, rtt)) rtt = self.render_time(None, rtt) paused = self.render_time(None, r_ev["paused_time"]) t[T.tr[T.td["[%d:+%d]" % (start, length)], T.td[srt(r_ev["start_time"])], T.td[srt(r_ev["finish_time"])], T.td[bytes], T.td[rtt], T.td[decrypt_time], T.td[paused], T.td[speed], ]] l[T.h2["Read Events:"], t] l[T.br(clear="all")] t = T.table(align="left", class_="status-download-events") t[T.tr[T.th["segnum"], T.th["start"], T.th["active"], T.th["finish"], T.th["range"], T.th["decodetime"], T.th["segtime"], T.th["speed"]]] for s_ev in self.download_status.segment_events: range_s = "-" segtime_s = "-" speed = "-" decode_time = "-" if s_ev["finish_time"] is not None: if s_ev["success"]: segtime = s_ev["finish_time"] - s_ev["active_time"] segtime_s = self.render_time(None, segtime) seglen = s_ev["segment_length"] range_s = "[%d:+%d]" % (s_ev["segment_start"], seglen) speed = self.render_rate(None, compute_rate(seglen, segtime)) decode_time = self._rate_and_time(seglen, s_ev["decode_time"]) else: # error range_s = "error" else: # not finished yet pass t[T.tr[T.td["seg%d" % s_ev["segment_number"]], T.td[srt(s_ev["start_time"])], T.td[srt(s_ev["active_time"])], T.td[srt(s_ev["finish_time"])], T.td[range_s], T.td[decode_time], T.td[segtime_s], T.td[speed]]] l[T.h2["Segment Events:"], t] l[T.br(clear="all")] t = T.table(align="left", class_="status-download-events") t[T.tr[T.th["serverid"], T.th["shnum"], T.th["range"], T.th["txtime"], T.th["rxtime"], T.th["received"], T.th["RTT"]]] for r_ev in self.download_status.block_requests: server = r_ev["server"] rtt = None if r_ev["finish_time"] is not None: rtt = r_ev["finish_time"] - r_ev["start_time"] color = _color(server) t[T.tr(style="background: %s" % color)[T.td[server.get_name()], T.td[r_ev["shnum"]], T.td["[%d:+%d]" % (r_ev["start"], r_ev["length"])], T.td[srt(r_ev["start_time"])], T.td[srt(r_ev["finish_time"])], T.td[r_ev["response_length"] or ""], T.td[self.render_time(None, rtt)], ]] l[T.h2["Requests:"], t] l[T.br(clear="all")] return l def render_results(self, ctx, data): d = self.download_results() def _got_results(results): if results: return ctx.tag return "" d.addCallback(_got_results) return d def render_started(self, ctx, data): started_s = render_time(data.get_started()) return started_s + " (%s)" % data.get_started() def render_si(self, ctx, data): si_s = base32.b2a_or_none(data.get_storage_index()) if si_s is None: si_s = "(None)" return si_s def render_helper(self, ctx, data): return {True: "Yes", False: "No"}[data.using_helper()] def render_total_size(self, ctx, data): size = data.get_size() if size is None: return "(unknown)" return size def render_progress(self, ctx, data): progress = data.get_progress() # TODO: make an ascii-art bar return "%.1f%%" % (100.0 * progress) def render_status(self, ctx, data): return data.get_status()