def get_rst(self): #~ print 'MainBlogIndexDirective.get_rst()' #~ env = self.state.document.settings.env #~ print self.arguments, self.options, self.content cellsep = '<NEXTCELL>' rowsep = '<NEXTROW>' if len(self.arguments) > 0: cellsep = self.arguments[0] if len(self.arguments) > 1: rowsep = self.arguments[1] content = '\n'.join(self.content) rows = [] colcount = None for row in content.split(rowsep): cells = [cell.strip() for cell in row.split(cellsep)] if colcount is None: colcount = len(cells) else: assert colcount == len(cells) rows.append(cells) if 'header' in self.options: return rstgen.table(rows[0], rows[1:]) return rstgen.table([""] * colcount, rows, show_headers=False)
def make_pod_timing_table(self, f): # Determine average start pod_timings_avg = { pod: {cont: sum(ts) / len(ts) for cont, ts in pod_conts.items()} for pod, pod_conts in self.pod_timings.items() if all(len(ts) > 1 for ts in pod_conts.values()) } if not pod_timings_avg: return max_stages = max(len(container) for container in pod_timings_avg.values()) header = ["**Pod**"] + [""] * max_stages # Go through pods sorted by last average finish pods_sorted = sorted( pod_timings_avg.items(), key=lambda pod_map: max(pod_map[1].values()) ) rows = [] for pod, cont_avgs in pods_sorted: # Right-justify the row row = [pod] + (max_stages - len(cont_avgs)) * [""] for cont, avg in sorted( cont_avgs.items(), key=lambda cont_avg: cont_avg[1] ): row.append(f"{cont}: {avg:.2f} s") rows.append(row) print(rstgen.table(header, rows), file=f)
def analyze_rst(*packages): """ Example: >>> from lino.utils.code import analyze_rst >>> print analyze_rst('lino') """ fields = 'count_code count_doc count_comment count_total'.split() headers = [ "name", "code lines", "doc lines", "comment lines", "total lines" ] rows = [] def fmt(n): return "{}k".format(round(n / 1000.0, 1)) total_sums = [0] * len(fields) for package in packages: sums = [0] * len(fields) for name, filename in codefiles(package + '*'): sf = SourceFile(name, filename) for i, k in enumerate(fields): sums[i] += getattr(sf, k) rows.append([package] + [fmt(n) for n in sums]) for i, k in enumerate(fields): total_sums[i] += sums[i] rows.append(['total'] + [fmt(n) for n in total_sums]) return rstgen.table(headers, rows)
def make_matches_table(self, f): # Compose header from classifier names header = ["name"] for cfr in cfrs_sorted: header.append(cfr.skb) # Now go through files rows = [] match_count = {cfr.skb: 0 for cfr in classifiers.classifiers} for fname in sorted(self.matches_per_file.keys()): matches = self.matches_per_file[fname] row = [fname] for cfr in cfrs_sorted: cfr_matches = [match for match in matches if match["cfr"] is cfr] if cfr_matches: row.append("X") match_count[cfr.skb] += 1 else: row.append("") rows.append(row) # Add a table with the count at the end rows.append(["Sum:"] + [str(match_count[cfr.skb]) for cfr in cfrs_sorted]) print(rstgen.table(header, rows), file=f)
def get_rst(self): env = self.state.document.settings.env entries = [] all_docnames = env.found_docs.copy() found = set([env.docname]) # don't include myself for entry in self.content: if not entry: continue patname = docname_join(env.docname, entry) docnames = sorted(patfilter(all_docnames, patname)) for docname in docnames: if not docname in found: found.add(docname) entries.append(self.entry_class.create(env, docname)) expr = self.options.get('filter') if expr: def func(e): try: return eval(expr, dict(e=e)) except Exception as exc: return "{} in {}".format(exc, expr) entries = list(filter(func, entries)) orderby = self.options.get('orderby') if orderby: def func(a): return getattr(a, orderby, '') entries = sorted(entries, key=func) headers = self.get_headers() rows = [] for e in entries: rows.append(self.format_entry(e)) return rstgen.table(headers, rows)
def show_choicelists(): """ Show all the choicelists defined in this application. """ headers = ["name", "#items", "preferred_width"] + [lng.name for lng in settings.SITE.languages] rows = [] for i in sorted(kernel.CHOICELISTS.values(), key=lambda s: str(s)): row = [str(i), len(i.choices), i.preferred_width] + str2languages(i.verbose_name_plural) rows.append(row) print(table(headers, rows))
def show_choicelist(cls): """ Similar to :func:`rt.show`, but the `text` is shown in all languages instead of just the current language. """ headers = ["value", "name"] + [lng.name for lng in settings.SITE.languages] rows = [] for i in cls.get_list_items(): row = [i.value, i.name] + str2languages(i.text) rows.append(row) print(table(headers, rows))
def show_fields(model, fieldnames=None, columns=False, all=None): """ Print an overview description of the specified fields of the specified model. If model is an action or table, print the parameter fields of that action or table. If model is a table and you want the columns instead of the parameter fields, then specify `columns=True`. By default this shows only fields which have a help text. If you specify `all=True`, then also fields that have no help text will be shown. """ cells = [] cols = ["Internal name", "Verbose name", "Help text"] if all is None: all = fieldnames is not None if isinstance(model, BoundAction): get_field = model.action.parameters.get if fieldnames is None: fieldnames = model.action.params_layout elif isinstance(model, Action): get_field = model.parameters.get if fieldnames is None: fieldnames = model.params_layout.main elif issubclass(model, Model): get_field = model._meta.get_field # get_field = model.get_data_elem if fieldnames is None: fieldnames = [f.name for f in model._meta.get_fields()] elif issubclass(model, AbstractTable): if columns: get_field = model.get_data_elem if fieldnames is None: fieldnames = model.column_names # get_handle().list_layout.main.columns else: get_field = model.parameters.get if fieldnames is None: fieldnames = model.params_layout.main if isinstance(fieldnames, str): fieldnames = fieldnames.split() for n in fieldnames: fld = get_field(n) if fld is not None and hasattr(fld, 'verbose_name'): ht = fld.help_text or '' if ht or all: cells.append([n, fld.verbose_name, unindent(ht)]) print(table(cols, cells).strip())
def list(ctx, rst): """ List the Lino applications known by getlino. """ if rst: click.echo(".. Generated by `getlino list --rst`") click.echo(".. _getlino.apps:\n") click.echo(rstgen.header(1, "List of the known Lino applications")) click.echo( "\nThe following applications are supported by :cmd:`getlino startsite`.\n" ) rows = [] headings = ["Name", "Short description", "Nickname"] for r in KNOWN_APPS: m = import_module(r.settings_module) s = m.Site # r: nickname package_name git_repo settings_module front_end # print(r.settings_module) if rst: cells = [ ":ref:`{s.verbose_name}<{r.nickname}>`".format(**locals()), s.description or '', r.nickname ] rows.append(cells) else: click.echo( "{r.nickname} : {s.verbose_name} : {s.description}".format( **locals())) # if s.description: # click.echo("\n" + s.description.strip() + "\n") # if r.git_repo: # print("(`Source repository <{r.git_repo}>`__)".format(**locals())) if rst: click.echo(rstgen.table(headings, rows)) tpl = JINJA_ENV.get_template("apps_section.rst") for r in KNOWN_APPS: m = import_module(r.settings_module) s = m.Site p = import_module(r.package_name.replace("-", "_")) public_url = None if hasattr(p, 'intersphinx_urls'): public_url = p.intersphinx_urls.get('docs', None) context = dict(repo=r, package=p, m=m, site=s, rstgen=rstgen, public_url=public_url) click.echo(tpl.render(**context))
def get_rst(self): #~ print 'MainBlogIndexDirective.get_rst()' #~ env = self.state.document.settings.env #~ print self.arguments, self.options, self.content left = '\n'.join(self.content) right = '' for arg in self.arguments[0].split(): right += '.. figure:: %s\n' % arg for i in list(self.options.items()): right += " :%s: %s\n" % i right += "\n %s\n\n" % arg #~ right += "\n \n\n" % arg return rstgen.table(["", ""], [[left, right]], show_headers=False)
def doit(): cells = [] cols = ["Action name", "Verbose name", "Help text", "Target state", "Required states"] # , "Required roles"] for a in actions: ht = a.help_text or '' if ht or all: # required_roles = ' '.join( # [str(r) for r in a.required_roles]) cells.append( [a.action_name, a.label, unindent(ht), a.target_state, a.required_states or '', # required_roles ]) print(table(cols, cells).strip())
def show_db_overview(self): """Return a reStructredText-formatted "database overview" report. Used by test cases in tested documents. """ models_list = sorted_models_list() apps = [p.app_label for p in settings.SITE.installed_plugins] s = "%d apps: %s." % (len(apps), ", ".join(apps)) s += "\n%d models:\n" % len(models_list) i = 0 headers = [ #~ "No.", "Name", "Default table", #~ "M", "#fields", "#rows", #~ ,"first","last" ] rows = [] for model in models_list: if True: # model._meta.managed: i += 1 cells = [] #~ cells.append(str(i)) cells.append(fmn(model)) cells.append(model.get_default_table()) #~ cells.append(str(model)) #~ if model._meta.managed: #~ cells.append('X') #~ else: #~ cells.append('') cells.append(str(len(model._meta.concrete_fields))) qs = model.objects.all() n = qs.count() cells.append(str(n)) #~ if n: #~ cells.append(obj2str(qs[0])) #~ cells.append(obj2str(qs[n-1])) #~ else: #~ cells.append('') #~ cells.append('') rows.append(cells) s += rstgen.table(headers, rows) return s
def model_overview(model): headers = ["name", "type"] #~ formatters = [ #~ lambda f: f.name, #~ lambda f: f.__class__.__name__, #~ ] headers.append("verbose name") #~ for lng in babel.AVAILABLE_LANGUAGES: #~ headers.append("verbose name (" + lng + ")") #~ headers.append("help text") #~ formatters.append(lambda f: f.help_text) def verbose_name(f): settings.SITE.set_language(None) label_en = force_text(_(f.verbose_name)) babel_labels = [] for lng in settings.SITE.languages[1:]: dbutils.set_language(lng.django_code) label = force_text(_(f.verbose_name)) if label != label_en: babel_labels.append(label) if babel_labels: label_en += " (%s)" % ",".join(babel_labels) return label_en def rowfmt(f): cells = [f.name, fieldtype(f), verbose_name(f)] #~ for lng in babel.AVAILABLE_LANGUAGES: #~ babel.set_language(lng) #~ cells.append(force_text(_(f.verbose_name))) #~ cells.append(f.help_text) return cells rows = [rowfmt(f) for f in model._meta.fields] s = rstgen.table(headers, rows) model_reports = [r for r in kernel.master_tables if r.model is model] if model_reports: s += '\n\nMaster tables: %s\n\n' % rptlist(model_reports) if getattr(model, '_lino_slaves', None): s += '\n\nSlave tables: %s\n\n' % rptlist( list(model._lino_slaves.values())) #~ s += '\n\nSlave reports: ' #~ s += ', '.join([name for name,rpt in model._lino_slaves.items()]) #~ s += '\n\n' return s
def fields_table(fields): headers = ["name", "type"] #~ formatters = [ #~ lambda f: f.name, #~ lambda f: f.__class__.__name__, #~ ] headers.append("verbose name") headers.append("help text") def rowfmt(f): cells = [ f.name, fieldtype(f), f.verbose_name, f.help_text ] #~ for lng in babel.AVAILABLE_LANGUAGES: #~ babel.set_language(lng) #~ cells.append(force_text(_(f.verbose_name))) #~ cells.append(f.help_text) return cells rows = [rowfmt(f) for f in fields if not hasattr(f, '_lino_babel_field')] return rstgen.table(headers, rows)
def sql_summary(lines, show_times=False, show_details=False, **options): """ Parse the SQL queries from `lines` and print a summary. `lines` is an iterable of text lines from a logfile or from :func:`lino.api.doctest.show_sql_summary`. Any backticks and double quotes are removed for readability. MySQL uses backticks where SQLite uses double quotes around table and field names in the SQL syntax. `Here <https://stackoverflow.com/questions/11321491/when-to-use-single-quotes-double-quotes-and-backticks-in-mysql>`__ is an interesting discussion with examples. """ if sqlparse is None: raise Exception("sql_summary() requires the sqlparse package") # matches = [] d = {} for l in lines: l = l.replace('"', '') l = l.replace('`', '') m = re.match(regex, l) if m: g = m.groupdict() entry = Entry(g['sql'], g['time']) k = entry.group_key() if k in d: d[k].collect(entry) else: d[k] = entry else: raise Exception("Invalid line {!r}".format(l)) # k = None # for op, regex in operations: # m = re.match(regex, l) # if m: # g = m.groupdict() # k = g['table'] + op # g['operation'] = op # break # if k: # g['time'] = float(g['time']) # r = d.setdefault(k, {}) # r['count'] = r.get('count', 0) + 1 # r["total_time"] = r.get("total_time", 0 ) + float(g['time']) # if r.get('time', -1) < g['time']: # d[k].update(g) # else: # raise Exception("Invalid line {!r}".format(l)) if d: if show_details: for e in sorted(d.values(), key=lambda x: x.total_time): p(e, **options) print("-------------------") print("The slowest SQL call was:") # find max e = d[max(d, key=lambda x: x.time)] p(e, **options) print("-------------------") else: if show_times: headers = 'total_time count table stmt_type time'.split() values = sorted(d.values(), key=lambda x: -x.total_time) else: headers = 'table stmt_type count'.split() values = sorted(d.values(), key=lambda x: x.table) rows = [] for e in values: rows.append([getattr(e, h) for h in headers]) rows.sort() print(rstgen.table(headers, rows)) else: print("No sql queries found")
def make_overview_table(self, f, revision=None): # Print a header if revision is None: files = self.matches_per_file.keys() else: files = self.revision_files[revision] file_count = len(files) print(f"Covering {file_count} logs.", file=f) if file_count > 0: files_sorted = sorted(files, key=lambda fname: self.file_date.get(fname)) print( f"Date range: {self.file_date[files_sorted[0]]} to {self.file_date[files_sorted[-1]]}\n", file=f, ) # Start composing table header = [ "**SKB**", "**Message**", "**Affected Runs**", "**Last**", "**Cross-Check**", ] rows = [] for cfr in sorted( cfrs_sorted, key=lambda cfr: len(self.files_per_cfr[cfr.skb]), reverse=True ): matches = self.all_matches_per_clfr[cfr.skb] files_with_matches = self.files_per_cfr[cfr.skb] # Specialise to revision, if requested. if revision is not None: # Skip rows that have no matches globally (so we don't # repeat them for every revision). if not matches: continue matches = [ cfr_match for cfr_match in matches if cfr_match["revision"] == revision ] files_with_matches = files_with_matches & self.revision_files[revision] files_matched_rel = len(files_with_matches) / max(1, file_count) # Include date + link to latest match if matches: last_match = sorted(matches, key=lambda match: match["date"])[-1] # Generate link - if it is one of the matches we are keeping if any( cfr_match["log_id"] == last_match["log_id"] for cfr_match in self.matches_per_clfr[cfr.skb] ): last_ref = f":ref:`{last_match['date']} <{cfr.skb}-{last_match['log_id']}>`" else: last_ref = last_match["date"] else: last_ref = "" # Determine whether another kind of error appears to happen more often other_skb_changes = "" if files_with_matches: # Collect number of runs that fail for other classifiers as well cfr_intersection = { cfr2.skb: len(self.files_per_cfr[cfr2.skb] & files_with_matches) for cfr2 in classifiers.classifiers } # Determine ratio of those other failures in relation # to our failures with the ratio to all runs. Filter # out classifiers that did not happen, this SKB - as # well as taints if this classifier likely caused the # taint. cfr_intersection_changes = sorted( [ ( cfr2, cfr_intersection[cfr2.skb] / len(files_with_matches) - len(self.files_per_cfr[cfr2.skb]) / file_count, ) for cfr2 in classifiers.classifiers if len(self.files_per_cfr[cfr2.skb]) > 0 and cfr2.skb != cfr.skb and (not cfr.taints or not cfr2.skb.startswith("TAINT")) ], key=lambda skb_diff: skb_diff[1], reverse=True, ) other_skb_changes = ", ".join( f":ref:`{cfr2.skb} <{cfr2.skb}>`: {change*100:+.1f}%" for cfr2, change in cfr_intersection_changes[:3] if change > 0 ) rows.append( [ f":ref:`{cfr.skb} <{cfr.skb}>`", cfr.message, f"{len(files_with_matches)} ({files_matched_rel*100:.1f}%)", last_ref, other_skb_changes, ] ) print(rstgen.table(header, rows), file=f)