def cli2rst(cliFile, rstFile): cli = ctk_cli.CLIModule(cliFile) rst = rstcloth.RstCloth() rst.title(cli.title) rst.newline() rst.content(cli.description) rst.newline() rst.field('Authors', cli.contributor) rst.field('Version', cli.version) rst.field('License', cli.license) rst.newline() rst.newline() for parameterGroup in cli: rst.h2(parameterGroup.label + ' Parameters') rst.content(parameterGroup.description) rst.newline() for parameter in parameterGroup: rst.definition(parameter.label, parameter.description, bold=True) rst.newline() rst.write(rstFile)
def __str__(self): d = rstcloth.RstCloth() d.h3(self.name) d.newline() if self.description: d.content(self.description) d.newline() comments = list(self.comments) if comments: for comment in comments: d.li([comment]) # work around rstcloth bug d.newline() for checklist in self.checklists: d.h4(checklist['name'] or 'Checklist') for element in checklist['items']: d.newline() d.li([element]) # work around rstcloth bug d.newline() for attachment in self.attachments: d.footnote('', attachment) return "\n".join(d.data)
def main(args): for arg in args: data = yaml.safe_load(open(arg).read()) doc = rstcloth.RstCloth() doc.title("{id}".format(**data)) doc.newline() doc.h2(data['title']) doc.field('Date', "{:%B %d, %Y}".format(data['date'])) doc.newline() doc.field('Description', data['description']) doc.newline() doc.field('Announcement', doc.inline_link(data['reference'],data['reference']), wrap=False) doc.newline() doc.field("Products affected", format_affected(data['affected-products']), wrap=False) doc.newline() doc.field("Credits", format_reporters(data['reporters']), wrap=False) doc.newline() doc.field("Bug reports", format_urls(data['issues']['links']), wrap=False) doc.newline() doc.field("Reviews", format_urls(data['reviews']['links']), wrap=False) doc.newline() #doc.directive('download', '`<{id}.yaml>`'.format(**data)) doc.print_content()
def make_restructured_text(spec_name, base_file, data_dict_xml_path, output_path, msgs, comps, fields): """Make restructuredtext documents for Sphinx consumption""" data_per_file = dict() # Construct base document header d = rstcloth.RstCloth() d.title(spec_name + " | " + base_file) d.newline() # Copy data dictionary as-is into the output path with open(data_dict_xml_path, mode='r') as data_dict_file: data_per_file[os.path.join(output_path, base_file)] = data_dict_file.read() d.h2("Data Dictionary Source") d.content(rstcloth.RstCloth.role("download", base_file)) d.newline() # Categorize all the messages msg_with_categories = dict() sorted_msg_names = sorted([msg for msg in msgs]) for msg_name in sorted_msg_names: # Categorize the message msgtype = msgs[msg_name]['category'] if not msgtype in msg_with_categories: msg_with_categories[msgtype] = list() msg_with_categories[msgtype].append(msg_name) # Generate the data msg_file_data = produce_pages.produce_message_page( msg_name, msgs[msg_name], fields, comps) data_per_file[os.path.join(output_path, "Messages", msg_name + ".rst")] = msg_file_data # Generate table of contents sorted_categories = sorted([cat for cat in msg_with_categories]) for msg_category in sorted_categories: d.h2("Messages - " + msg_category.upper()) msgs_content = [ "Messages/" + str(key) for key in msg_with_categories[msg_category] ] d.directive(name="toctree", content=msgs_content) d.newline() data_per_file[os.path.join(output_path, "index.rst")] = '\n'.join(d.data) # Generate conf.py for sphinx conf_py = list() conf_py.append("project = '" + base_file + "'") conf_py.append("author = '" + spec_name + "'") conf_py.append("source_suffix = '.rst'") conf_py.append("master_doc = 'index'") conf_py.append("language = None") conf_py.append("html_theme = 'bizstyle'") conf_py.append("exclude_patterns = ['Thumbs.db', '.DS_Store']") conf_py.append("pygments_style = None") data_per_file[os.path.join(output_path, "conf.py")] = '\n'.join(conf_py) return data_per_file
def main(days, skip_not_updated): config_info = get_config_info() api = trelloclient.TrelloClient(api_key=config_info['api_key'], token=config_info['access_token']) b = get_board(api, config_info['board']) assert b is not None in_progress_list = get_list(b, config_info['in_progress_list']) assert in_progress_list is not None done_list = get_list(b, config_info['done_list']) assert done_list is not None top_labels = [l.strip() for l in config_info['top_labels'].split(',')] end_labels = [l.strip() for l in config_info['bottom_labels'].split(',')] print('Report generated on: %s.' % datetime.date.today().isoformat()) print('Generated by: https://github.com/booxter/trello-report\n') labels = get_board_labels(b, top_labels, end_labels) since = (datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(days=days)) if days else None d = rstcloth.RstCloth() for l in (in_progress_list, done_list): d.h1(l.name) d.newline() cards, not_updated = get_cards(l, updated_since=since) for label in labels: labeled_cards = get_cards_by_label(cards, label) if labeled_cards: d.h2(label) for card in labeled_cards: d.newline() d._add(str(card)) cards.remove(card) d.newline() # handle remaining, unlabeled cards if cards: d.h2('Other') for card in cards: d.newline() d._add(str(card)) d.newline() # special section for cards that haven't had updates this week if not_updated and not skip_not_updated: d.h2("Not Updated") for card in not_updated: d.newline() d._add(str(card)) d.print_content()
def render_intermediate_files(fn, version, releases, conf): """ Each major release series has an "intermediate" file that includes the changelog file for each release. This way, there's no requirement to change the changelog file every release. """ r = rstcloth.RstCloth() for rel in releases: r.directive( "include", "/includes/changelogs/releases/{0}.rst".format('.'.join( [str(s) for s in rel]))) r.newline() r.write(fn) migrate_changelog(fn, conf) logger.info("wrote intermediate versions file ")
def get_changelog_content(fn, version, conf): """ Builds and writes the heading based on the ordered mapping of heading groups to RST files in the source directory. """ # this queries jira and builds an map[OrderedDict]list<issue_pairs> # structure that holds the data and groupings headings = get_issue_structure(version, conf) # invert the mapping of nested, so we can properly handle subheadings. nested = dict() for enclosing_level, sub_headings in conf.system.files.data.jira.changelog.nesting.items( ): for component in sub_headings: nested[component] = enclosing_level # build the changelog content itself. r = rstcloth.RstCloth() level = 3 # headings and links r.ref_target("{0}-changelog".format(version)) r.newline() r.heading(text="{0} Changelog".format(version), char=giza.content.helper.character_levels[level - 1]) r.newline() # process all of the issues by group. for heading, issues in headings.items(): if heading in nested: # we deal with nested headings when we do their parent. skip here. continue else: if heading in conf.system.files.data.jira.changelog.nesting and len( issues) == 0: # if a heading has subheadings, and all are empty, then we should skip it entirely. empty_sub_headings = 0 for sub in conf.system.files.data.jira.changelog.nesting[ heading]: if len(headings[sub]) == 0: empty_sub_headings += 1 if empty_sub_headings == len(conf.system.files.data.jira. changelog.nesting[heading]): continue elif len(issues) == 0: # skip empty headings. continue # format the heading. r.heading(text=heading, indent=0, char=giza.content.helper.character_levels[level]) r.newline() if len(issues) == 1: r.content("{1} {0}".format(issues[0][1], r.role("issue", issues[0][0])), wrap=False) else: for issue in issues: r.li("{1} {0}".format(issue[1], r.role("issue", issue[0])), wrap=False) r.newline() # repeat the above formatting with minor variations to do the nesting. if heading in conf.system.files.data.jira.changelog.nesting: for sub in conf.system.files.data.jira.changelog.nesting[ heading]: if len(headings[sub]) == 0: continue r.heading(text=sub, indent=0, char=giza.content.helper.character_levels[level + 1]) r.newline() sub_issues = headings[sub] if len(sub_issues) == 0: r.content("{1} {0}".format( sub_issues[0][1].strip(), r.role("issue", sub_issues[0][0])), wrap=False) else: for issue in sub_issues: r.li("{1} {0}".format(issue[1].strip(), r.role("issue", issue[0])), wrap=False) r.newline() r.write(fn) logger.info( "wrote changelog '{0}'. Commit this file independently.".format(fn)) migrate_changelog(fn, conf)
def produce_message_page(message_name, message_content, fields, components): """Produce an individual restructuredtext document for an individual message page""" d = rstcloth.RstCloth() d.title(message_name + " (" + str(message_content['msgtype']) + ")") # Message Summary d.newline() t = table.TableData(num_columns=2) t.add_header(["MsgType", "Category"]) t.add_row( [message_content['msgtype'], message_content['category'].upper()]) for table_line in table.RstTable(t).render_table(): d.content(table_line, wrap=False) d.newline() #Message Fields standard_header = [ "Tag", "Field Name", "Req'd", "Data Type", "Acceptable Enums" ] components_to_add = list() d.h2("Fields") d.newline() t = table.TableData(num_columns=5) t.add_header(standard_header) table_empty = True sorted_element_names = sorted( [elem_name for elem_name in message_content['elements']]) for element_name in sorted_element_names: element_rows, new_comps_to_add = _produce_element_rows( element_name, message_content['elements'][element_name], fields, components_to_add) for new_comp in new_comps_to_add: if not new_comp in components_to_add: components_to_add.append(new_comp) for element_row in element_rows: table_empty = False t.add_row(element_row) if table_empty: t.add_row(["*empty*" for _ in standard_header]) for table_line in table.ListTable(t).output: d.content(table_line, wrap=False) d.newline() #If there were any components, add those now if len(components_to_add) > 0: d.h2("Components") d.newline() processing_components = [comp for comp in components_to_add] processed_component_rows = dict() while len(processing_components) > 0: component = processing_components.pop() if not component in processed_component_rows: processed_component_rows[component] = dict() for comp_elem in components[component]: component_data = components[component][comp_elem] element_rows, new_comps_to_add = _produce_element_rows( comp_elem, component_data, fields, components_to_add) processed_component_rows[component][comp_elem] = element_rows for new_comp in new_comps_to_add: if not new_comp in components_to_add: components_to_add.append(new_comp) processing_components.append(new_comp) sorted_component_names = sorted( [name for name in processed_component_rows]) for component in sorted_component_names: d.h4(component) d.newline() t2 = table.TableData(num_columns=5) t2.add_header(standard_header) sorted_component_elems = sorted( [elem for elem in processed_component_rows[component]]) for comp_elem in sorted_component_elems: element_rows = processed_component_rows[component][comp_elem] for element_row in element_rows: t2.add_row(element_row) for table_line in table.ListTable(t2).output: d.content(table_line, wrap=False) d.newline() return '\n'.join(d.data)
def generate_changelog_rst(config, headings, fixVersion): """ Generate the changelog rst from the groupings established in get_issue_structure() """ # invert the mapping of nested, so we can properly handle subheadings. nested = dict() for enclosing_level, sub_headings in config.get('nesting').items(): for component in sub_headings: nested[component] = enclosing_level # build the changelog content itself. r = rstcloth.RstCloth() level = 3 r.ref_target("{0}-changelog".format(fixVersion)) r.newline() r.heading(text="{0} Changelog".format(fixVersion), char='-') r.newline() # process all of the issues by group. for heading, issues in headings.items(): if heading in nested: # we deal with nested headings when we do their parent. skip here. continue else: if heading in config.get('nesting') and len(issues) == 0: # if a heading has subheadings, and all are empty, then we should skip it entirely. empty_sub_headings = 0 for sub in config.get('nesting').get(heading): if len(headings[sub]) == 0: empty_sub_headings += 1 if empty_sub_headings == len( config.get('nesting').get(heading)): continue elif len(issues) == 0: # skip empty headings. continue # format the heading. r.heading(text=heading, indent=0, char='~') r.newline() if len(issues) == 1: r.content("{1} {0}".format(issues[0][1], r.role("issue", issues[0][0])), wrap=False) else: for issue in issues: r.li("{1} {0}".format(issue[1], r.role("issue", issue[0])), wrap=False) r.newline() # repeat the above formatting with minor variations to do the nesting. if heading in config.get('nesting'): for sub in config.get('nesting').get(heading): if len(headings[sub]) == 0: continue r.heading( text=sub, indent=0, # char=giza.content.helper.character_levels[level+1]) char='`') r.newline() sub_issues = headings[sub] if len(sub_issues) == 0: r.content("{1} {0}".format( sub_issues[0][1].strip(), r.role("issue", sub_issues[0][0])), wrap=False) else: for issue in sub_issues: r.li("{1} {0}".format(issue[1].strip(), r.role("issue", issue[0])), wrap=False) r.newline() return r