def write_table_to_md_file(md_file: MdUtils, table_rows_: List[List[str]]): flat_rows = [] for row in table_rows_: flat_rows += row md_file.new_table(columns=len(table_rows_[0]), rows=len(table_rows_), text=flat_rows, text_align='center')
def format_entry(md_file: MdUtils, entry: Dict): assert entry.get('title') formatted = FORMAT.format(TITLE=entry.get('title'), AUTHOR=entry.get('author', '').replace(' and', ','), YEAR=entry.get('year', ''), JOURNAL=entry.get('journal', '')) md_file.new_line(formatted)
def md_table_writer(name, rows): if name == 'common': mdfile = MdUtils(file_name='results_common') mdfile.new_table(columns=5, rows=int(len(rows)/5), text=rows, text_align='center') else: mdfile = MdUtils(file_name='results_{}'.format(name)) mdfile.new_table(columns=6, rows=int(len(rows)/6), text=rows, text_align='center') mdfile.create_md_file() lines_delete('results_{}.md'.format(name))
def create_md_file(resume): md_file = MdUtils(file_name='index.md', title=resume['about_me']['name']) _about_me(md_file, resume) _studies(md_file, resume) _career(md_file, resume) _projects(md_file, resume) md_file.create_md_file()
def write_table(parts, keys, md: mdutils.MdUtils, sort_by='Comment'): sorted_parts = sorted(parts, key=lambda i: i[sort_by]) tabulated = list(keys) for part in sorted_parts: tabulated.extend([part.get(key) for key in keys]) #print(part) #print(len(keys), len(sorted_parts)+1, len(tabulated)) md.new_line() md.new_table(columns=len(keys), rows=len(sorted_parts) + 1, text=tabulated)
def __init__(self, repo): self.mdFile = MdUtils(file_name='CHANGELOG', title='CHANGELOG') self.mdFile.new_line( "## **Release** " + \ "[{0}](https://github.com/nanocurrency/nano-node/tree/{0})"\ .format(repo.end)) self.mdFile.new_line("[Full Changelog](https://github.com/nanocurrency"\ "/nano-node/compare/{0}...{1})".format(repo.start, repo.end)) sort = self.pull_to_section(repo.commits) for section, prs in sort.items(): self.write_header(section) for pr in prs: self.write_PR(pr, repo.commits[pr[0]]) self.mdFile.create_md_file()
def add_table(self, md_file: MdUtils, translations): table_entries = ["Intention", "Command", "Msg Id"] for translation in translations: intent = translation.occurrences[0][ 0] + ":" + translation.occurrences[0][1] # print(translation.msgid, translation.msgstr) table_entries.extend( [intent, translation.msgstr, translation.msgid]) md_file.new_table(columns=3, rows=len(translations) + 1, text=table_entries, text_align='left')
def write_to_markdown(self, filename: str): mdFile = MdUtils( file_name=self.OUTPUT_DIR + filename, title='Mycroft.ai commands (autogenerated from po files)') for plugin_name in self.translations: mdFile.new_header(level=1, title=plugin_name) mdFile.new_line() self.add_table(mdFile, self.translations[plugin_name]) mdFile.create_md_file()
def to_md(self, mdFile: MdUtils) -> MdUtils: mdFile.new_line(f'{self.font_name} - {self.font_size}:', bold_italics_code='b') mdFile.new_line( f'There were in total {len(self.error_words)} mismatches.') mdFile.new_line( f'The levenshtein distance of the complete text is {self.total_levenshtein}' ) mdFile.new_line() md_text: List[str] = [ 'Word in PDF', 'Recognized Word', 'Levenshtein Distance' ] for word_fail in self.error_words: md_text.extend([ word_fail.pdf_word, word_fail.ocr_word, str(word_fail.error_value) ]) mdFile.new_table(columns=3, rows=len(self.error_words) + 1, text=md_text, text_align='center') return mdFile
def __init__(self, repo): self.mdFile = MdUtils(file_name='CHANGELOG', title='CHANGELOG') self.mdFile.new_line( "## **Release** " + "[{0}](https://github.com/BananoCoin/banano/tree/{0})".format( repo.end)) self.mdFile.new_line("[Full Changelog](https://github.com/bananocoin" "/nano-node/compare/{0}...{1})".format( repo.start, repo.end)) sort = self.pull_to_section(repo.commits) for section, prs in sort.items(): self.write_header_PR(section) for pr in prs: self.write_PR(pr, repo.commits[pr[0]]) if repo.other_commits: self.write_header_no_PR() for sha, message in repo.other_commits: self.write_no_PR(repo, sha, message) self.mdFile.create_md_file()
def write_designators(parts, keys, md: mdutils.MdUtils, sort_by='designator'): parts = sorted(parts, key=lambda i: i[sort_by]) groups = groupby(parts, lambda i: i[sort_by]) for designator, group in groups: #print('Designator {}'.format(designator)) md.new_line() md.new_header(level=2, title=designator) sorted_parts = sorted(group, key=lambda i: i['id']) tabulated = list(keys) for part in sorted_parts: tabulated.extend([part.get(key) for key in keys]) md.new_line() md.new_table(columns=len(keys), rows=len(sorted_parts) + 1, text=tabulated)
def md_file_create(self, research_topic, filename, ordered_by='time'): ''' Creates a markdown file of data in the research database given a research topic. Returns the file name ''' Record = Query() records = self.research_db.search( Record.research_topic == research_topic) mdfile = MdUtils(file_name=filename, title=research_topic) for record in records: mdfile.new_line('- ' + mdfile.new_inline_link( link=record["url"], text=record["page_title"])) mdfile.create_md_file() return
def write_md_file(herd, output_file_name, version): from mdutils import MdUtils software_summary_md = MdUtils(file_name=output_file_name, title=f'Software Summary (build {version})') table = get_table_columns_md() n_columns = len(table) for k, ch in herd.get_cattle_heads().items(): table.extend(ch.get_table_row(format='md')) software_summary_md.new_table(columns=n_columns, rows=herd.number_of_heads() + 1, text=table, text_align='center') logger.info(f'Create file {output_file_name}.md') software_summary_md.create_md_file()
def makeMarkdown(data, path): # Creates the README file global base_url mdf = MdUtils(file_name=path + 'README', title='RBA TechRadar for Azure') adopt_list = list() trial_list = list() assess_list = list() hold_list = list() reject_list = list() # Create categories on status for key in data: status = data[key].get("status") if status == "ADOPT": adopt_list.append(key) if status == "TRIAL": trial_list.append(key) if status == "ASSESS": assess_list.append(key) if status == "HOLD": hold_list.append(key) if status == "REJECT": reject_list.append(key) mdf.new_header(level=1, title='Overview') mdf.new_header(level=2, title='What is the purpose?') mdf.new_paragraph( "The RBA TechRadar for Azure is a tool to inspire and " "support engineering teams at Risk & Business Analytics to pick the best " "technologies for new projects; it provides a platform to share knowledge " "and experience in technologies, to reflect on technology decisions and " "continuously evolve our technology landscape. Based on the pioneering " "work at Thought Works, our radar sets out the changes in technologies " "that are interesting in cloud development - changes that we think our " "engineering teams should pay attention to and consider using in their " "projects.") mdf.new_header(level=2, title='How do we maintain it?') mdf.new_paragraph( "The RBA TechRadar for Azure is maintained by the Cloud " "Center of Excellence - an open group of senior RBA technologists committed " "to devote time to this purpose. The CCoE self organizes to maintain these " "documents, including this version. Assignment of technologies to rings is " "the outcome of status change proposals, which are discussed and voted on " "in CCoE meetings. The radar depends on active participation and input from " "all engineering teams at RBA.") mdf.new_header(level=2, title='What are the current ring assignments?') mdf.new_paragraph( "The RBA TechRadar for Azure is a list of technologies, " "complemented by an assesment result, called ring assignment. We use five " "rings with the following semantics:") # Handle the Adopt Section mdf.new_header(level=3, title='Adopt') mdf.new_paragraph( "Technologies we have high confidence in to serve our " "purpose, also at large scale. Technologies with a usage culture in the " "RBA production environment, low risk, automated policy enforcement and " "are recommended to be widely used.") adopt_tbl = [ "<sub>Resource</sub>", "<sub>Description</sub>", "<sub>Type</sub>", "<sub>Status</sub>" ] adopt_cnt = len(adopt_list) + 1 for key in adopt_list: resourceName = key resourceDesc = data[key].get("description", "") resourcePath = data[key].get("path", "") resourceType = data[key].get("architecture review", "").get("type", "") resourceUrl = data[key].get("url", "") resourceStatus = data[key].get("status", "") resourceName = "[" + resourceName + "](" + base_url + '/' + resourceUrl + ")" adopt_tbl.extend([ '<sub>' + resourceName + '</sub>', '<sub>' + resourceDesc + '</sub>', '<sub>' + resourceType + '</sub>', '<sub>' + resourceStatus + '</sub>' ]) if adopt_cnt == 1: mdf.new_line("") mdf.new_line("There are currently no resources at this ring level.", bold_italics_code='bi', color='red') else: mdf.new_line("") mdf.new_table(columns=4, rows=adopt_cnt, text=adopt_tbl) # Handle the Trial Section mdf.new_header(level=3, title='Trial') mdf.new_paragraph( "Technologies that we have seen work with success in projects " "to solve real problems; first serious usage experience that confirm benefits " "and uncover limitations. TRIAL technologies are slightly more risky; some " "engineers in our organization walked this path and will share knowledge and " "experiences. This area can contain services that have been architecture and " "security reviewed but do not contain automated policy managmeent.") trial_tbl = [ "<sub>Resource</sub>", "<sub>Description</sub>", "<sub>Type</sub>", "<sub>Status</sub>" ] trial_cnt = len(trial_list) + 1 for key in trial_list: resourceName = key resourceDesc = data[key].get("description", "") resourcePath = data[key].get("path", "") resourceType = data[key].get("architecture review", "").get("type", "") resourceUrl = data[key].get("url", "") resourceStatus = data[key].get("status", "") resourceName = "[" + resourceName + "](" + base_url + '/' + resourceUrl + ")" trial_tbl.extend([ '<sub>' + resourceName + '</sub>', '<sub>' + resourceDesc + '</sub>', '<sub>' + resourceType + '</sub>', '<sub>' + resourceStatus + '</sub>' ]) if trial_cnt == 1: mdf.new_line("") mdf.new_line("There are currently no resources at this ring level.", bold_italics_code='bi', color='red') else: mdf.new_line("") mdf.new_table(columns=4, rows=trial_cnt, text=trial_tbl) # Handle the Assess Section mdf.new_header(level=3, title='Assess') mdf.new_paragraph( "Technologies that are promising and have clear potential " "value-add for us; technologies worth investing some research and " "prototyping efforts to see if it has impact. ASSESS technologies have " "higher risks; they are often new to our organization and highly unproven " "within RBA. You will find some engineers that have knowledge in the " "technology and promote it, you may even find teams that have started " "a prototyping effort. These technologies can also include services that " "are currently in architecture or security review.") assess_tbl = [ "<sub>Resource</sub>", "<sub>Description</sub>", "<sub>Type</sub>", "<sub>Status</sub>" ] assess_cnt = len(assess_list) + 1 for key in assess_list: resourceName = key resourceDesc = data[key].get("description", "") resourcePath = data[key].get("path", "") resourceType = data[key].get("architecture review", "").get("type", "") resourceUrl = data[key].get("url", "") resourceStatus = data[key].get("status", "") resourceName = "[" + resourceName + "](" + base_url + '/' + resourceUrl + ")" assess_tbl.extend([ '<sub>' + resourceName + '</sub>', '<sub>' + resourceDesc + '</sub>', '<sub>' + resourceType + '</sub>', '<sub>' + resourceStatus + '</sub>' ]) if assess_cnt == 1: mdf.new_line("") mdf.new_line("There are currently no resources at this ring level.", bold_italics_code='bi', color='red') else: mdf.new_line("") mdf.new_table(columns=4, rows=assess_cnt, text=assess_tbl) # Handle the Hold Section mdf.new_header(level=3, title='Hold') mdf.new_paragraph( "Technologies not recommended to be used for new projects. " "Technologies that we think are not (yet) worth to (further) invest in. " "HOLD technologies should not be used for new projects, but usually can be " "continued for existing projects. These technologies may include services " "that have yet to be evaluated by architecture and security due to a lack " "of interest, time, or need.") hold_tbl = [ "<sub>Resource</sub>", "<sub>Description</sub>", "<sub>Type</sub>", "<sub>Status</sub>" ] hold_cnt = len(hold_list) + 1 for key in hold_list: resourceName = key resourceDesc = data[key].get("description", "") resourcePath = data[key].get("path", "") resourceType = data[key].get("architecture review", "").get("type", "") resourceUrl = data[key].get("url", "") resourceStatus = data[key].get("status", "") #resourceName = "["+resourceName+"]("+resourceUrl+")" hold_tbl.extend([ '<sub>' + resourceName + '</sub>', '<sub>' + resourceDesc + '</sub>', '<sub>' + resourceType + '</sub>', '<sub>' + resourceStatus + '</sub>' ]) if hold_cnt == 1: mdf.new_line("") mdf.new_line("There are currently no resources at this ring level.", bold_italics_code='bi', color='red') else: mdf.new_line("") mdf.new_table(columns=4, rows=hold_cnt, text=hold_tbl) # Handle the Reject Section mdf.new_header(level=3, title='Reject') mdf.new_paragraph( "Technologies not recommended to be used for any projects. " "Technologies that have undergone architecture and security review but do " "not meet company standards for use. REJECT technologies should never be " "used on any project and should be considered deprecated for existing " "projects.") reject_tbl = [ "<sub>Resource</sub>", "<sub>Description</sub>", "<sub>Type</sub>", "<sub>Status</sub>" ] reject_cnt = len(reject_list) + 1 for key in reject_list: resourceName = key resourceDesc = data[key].get("description", "") resourcePath = data[key].get("path", "") resourceType = data[key].get("architecture review", "").get("type", "") resourceUrl = data[key].get("url", "") resourceStatus = data[key].get("status", "") #resourceName = "["+resourceName+"]("+resourceUrl+")" reject_tbl.extend([ '<sub>' + resourceName + '</sub>', '<sub>' + resourceDesc + '</sub>', '<sub>' + resourceType + '</sub>', '<sub>' + resourceStatus + '</sub>' ]) if reject_cnt == 1: mdf.new_line("") mdf.new_line("There are currently no resources at this ring level.", bold_italics_code='bi', color='red') else: mdf.new_line("") mdf.new_table(columns=4, rows=reject_cnt, text=reject_tbl) mdf.create_md_file()
"to what was measured in the metrics.json file supplied via -r") args = parser.parse_args() results = args.results config = args.config output = args.output measured_metrics = json.load(open(results, 'r')) sota_checkpoints_eval = json.load(open(config), object_pairs_hook=OrderedDict) # Output tables for per-sample README files for sample_type in sota_checkpoints_eval: table_rows = get_results_table_rows(sota_checkpoints_eval[sample_type], measured_metrics, table_format='per_sample') file_name = 'results_{}.md'.format(sample_type) mdfile = MdUtils(file_name=file_name) write_table_to_md_file(mdfile, table_rows) mdfile.create_md_file() # Somehow the MDUtils outputs 4 empty lines prior to the actual table in the target file. delete_four_head_lines(file_name) # Output the overview table for the top-level README file overview_file_name = 'results_overview.md' mdfile = MdUtils(file_name=overview_file_name) # Compose a mini-TOC mdfile.new_line("Quick jump to sample type:") mdfile.new_line("==========================") for sample_type in sota_checkpoints_eval: header_name = SAMPLE_TYPE_TO_SAMPLE_DISPLAY_NAME_DICT[sample_type]
comment_issue_ids = [] for node in info['data']['user']["issueComments"]['nodes']: repo = node["issue"]["repository"]["nameWithOwner"] cur_datetime = str2time(node["issue"]["createdAt"]) start_datetime = str2time(start_time) end_datetime = str2time(end_time) for target in target_list: if repo.find(target) != -1: if start_datetime <= cur_datetime <= end_datetime: if node['issue']['id'] not in comment_issue_ids: comment_issue_ids.append(node['issue']['id']) comment_issues.append(node) break mdFile = MdUtils(file_name='output', title='Weekly Report') if len(open_issues) > 0: mdFile.new_header(level=2, title='Open Issues', add_table_of_contents='n') mdFile.new_list( map( lambda x: 'Open Issue ' + mdFile.new_inline_link( link=x['issue']['url'], text=x['issue']['title']), open_issues)) if len(open_prs) > 0: mdFile.new_header(level=2, title='Open PRs', add_table_of_contents='n') mdFile.new_list( map( lambda x: 'Open PR ' + mdFile.new_inline_link( link=x['pullRequest']['url'], text=x['pullRequest']['title']),
def generateMdFile(cardInfo): now = datetime.now() filename = now.strftime('%Y-%m-%d-%H-%M-%S') reportname = '%s-report' % (filename) print('Generating %s...' % (reportname)) mdFile = MdUtils(file_name=reportname, title='Today\'s Trello Report') # Intro text mdFile.write( 'Hey tech-team! Here’s a snapshot of what we’re working on based on our current Trello! ' ) mdFile.write( 'If you have updates you want to give or are interested in following up with/joining some of this work,' ) mdFile.write( 'either ask here, message the folks on the task, or get in the Trello yourself and see what’s going on!' ) mdFile.new_line() mdFile.new_line() for card in cardInfo: # Card Title print('Creating entry for %s...' % (card['name'])) mdFile.write('**%s**' % (card['name'])) # Card Tags if (len(card['labels']) > 0): mdFile.write(' (') for i in range(len(card['labels'])): mdFile.write('%s' % (card['labels'][i])) if (i != len(card['labels']) - 1): mdFile.write(', ') mdFile.write(')') # Names on the card if (len(card['members']) > 0): mdFile.write(' [') for i in range(len(card['members'])): mdFile.write('%s' % (card['members'][i])) if (i != len(card['members']) - 1): mdFile.write(', ') mdFile.write(']') # Recent comment if (len(card['recentComment']) > 0): mdFile.write(': ') mdFile.write('%s' % card['recentComment']) else: mdFile.write( ': No further information (someone should add some comments/users)!' ) # Lines between files mdFile.new_line() mdFile.new_line() # Create file mdFile.create_md_file() print('Done!')
class generateMarkdown(): def __repr__(self): return "<generateMarkdown(mdFile={0})>".format(self.mdFile) def __init__(self, repo): self.mdFile = MdUtils(file_name='CHANGELOG', title='CHANGELOG') self.mdFile.new_line( "## **Release** " + "[{0}](https://github.com/nanocurrency/nano-node/tree/{0})".format( repo.end)) self.mdFile.new_line("[Full Changelog](https://github.com/nanocurrency" "/nano-node/compare/{0}...{1})".format( repo.start, repo.end)) sort = self.pull_to_section(repo.commits) for section, prs in sort.items(): self.write_header_PR(section) for pr in prs: self.write_PR(pr, repo.commits[pr[0]]) if repo.other_commits: self.write_header_no_PR() for sha, message in repo.other_commits: self.write_no_PR(repo, sha, message) self.mdFile.create_md_file() def write_header_PR(self, section): self.mdFile.new_line("---") self.mdFile.new_header(level=3, title=section) self.mdFile.new_line("|Pull Request|Title") self.mdFile.new_line("|:-:|:--") def write_header_no_PR(self): self.mdFile.new_line("---") self.mdFile.new_header(level=3, title="Other Updates") self.mdFile.new_line("|Commit|Title") self.mdFile.new_line("|:-:|:--") def write_PR(self, pr, info): imp = "" if pr[1]: imp = "**BREAKING** " self.mdFile.new_line("|[#{0}]({1})|{2}{3}".format( pr[0], info['Url'], imp, info['Title'])) def write_no_PR(self, repo, sha, message): url = "https://github.com/{0}/commit/{1}".format(repo.name, sha) self.mdFile.new_line("|[{0}]({1})|{2}".format(sha[:8], url, message)) def handle_labels(self, labels): for section, values in SECTIONS.items(): for label in labels: if label in values: if any(string in labels for string in [ 'breaking', ]): return section, True else: return section, False return 'Other', False def pull_to_section(self, commits): sect = copy.deepcopy(SECTIONS) result = {} for a in sect: sect[a] = [] for pull, info in commits.items(): section, important = self.handle_labels(info['labels']) if important: sect[section].insert(0, [pull, important]) else: sect[section].append([pull, important]) for a in sect: if len(sect[a]) > 0: result[a] = sect[a] return result
def write_requirements(self, root_dir='.', md_file_name=None, format='md'): if not md_file_name: if self._current_tag: md_file_name = os.path.join(root_dir, self._current_tag, 'REQUIREMENTS.md') else: dev_or_stable = "dev" if self._dev else "stable" raise NoAppropriateVersionFoundException("No suitable version for " + dev_or_stable + "release") os.makedirs(os.path.dirname(md_file_name), exist_ok=True) requirements_md = MdUtils(file_name=md_file_name, title="Requirements Summary") for req_topic in self._requirements: requirements_md.new_header(level=1, title=req_topic) for req in self._requirements[req_topic]: impacted = self._current_tag in self._requirements_tag_map[req['number']]['tags'] if req['number'] in self._requirements_tag_map else False impacted_icon = ':boom:' if impacted else '' title = f"{req['title']} ([#{req['number']}](https://github.com/{self._repo}/issues/{req['number']})) {impacted_icon}" requirements_md.new_header(level=2, title=title) if impacted: issue_lines = {t : [] for t in Requirements.ISSUE_TYPES} for n in self._requirements_tag_map[req['number']]['issues']: issue = self._repo.issue(n) bug_or_enhancement = Requirements._issue_is_bug_or_enhancement(issue) issue_lines[bug_or_enhancement].append(f'{issue.title} ([#{n}](https://github.com/{self._repo}/issues/{n}))') for issue_type, issue_list in issue_lines.items(): if len(issue_lines[issue_type]): requirements_md.new_paragraph(f'The {issue_type}s which impact this requirements are:') requirements_md.new_list(issue_list) else: requirements_md.new_paragraph('This requirement is not impacted by the current version') requirements_md.create_md_file() if format == 'md': return md_file_name if format == 'html': html_file_name = md_file_name.replace('.md', '.html') return md_to_html(md_file_name, html_file_name, {'name': self._repo, 'description': self._repo.description, 'tag': self._current_tag}) else: logger.error(f'output format {format} is not supported') return '' self._clean_previous_dev_requirements(root_dir)
def _markdown(checklist): """ Generate markdown for checklist """ checklist = json.load(open(checklist), object_pairs_hook=OrderedDict) mdFile = MdUtils(file_name='Ikigai-Checklist', title='PDP 2019 Checklist') mdFile.new_paragraph(checklist['overview']) sections = sorted(checklist['checklist'].values(), key=lambda s: int(s['no'])) for section in sections: mdFile.new_header(level=1, title=section['title']) for subject in section['subjects'].values(): mdFile.new_header(level=2, title=subject['title']) mdFile.new_paragraph(subject['description']) mdFile.new_paragraph("Reference: " + subject['provisions']) mdFile.new_paragraph("Functions: " + ", ".join(subject['functions'])) mdFile.new_paragraph("Groups: " + ", ".join(subject['groups'])) actions = [['No', 'Description', 'Tags', 'Check']] actions += [[a['no'], a['description'], ", ".join(a['tags']), ''] for a in subject['actions'].values()] rows = len(actions) actions = flatten(actions) mdFile.new_table(columns=4, rows=rows, text=actions, text_align='left') mdFile.create_md_file()
def create_notebook(name, title): md_file = MdUtils(file_name=name) md_file.write("#%% md\n") md_file.write("# " + title + "\n") return md_file
def generate_report(): def generate_phrase_image(width: int, font: ImageFont) -> Image: result = Image.new(mode=GRAYSCALE_MODE, size=(width, font.size), color=WHITE) result_draw = ImageDraw.Draw(im=result, mode=GRAYSCALE_MODE) result_draw.text(xy=(0, 0), text=PHRASE, font=font, fill=0, anchor='lt') return cut_empty_rows_and_cols(result) def p_ass_for_table(p_ass): result = [] for row in p_ass: for element in row: result.append(str(element)) return result report = MdUtils(file_name=f'./report.md') report.new_header(level=1, title='Классификация') report.new_line(text='Выполнил Ахманов Алексей Б18-514') report.new_line(text=f'Алфавит - {ALPHABET}') report.new_line(text=f'Исходная фраза - {PHRASE}') report.new_line(text=f'Размер шрифта - {FONT_SIZE}') # Phrase phrase_image = generate_phrase_image( 1200, ImageFont.truetype(font=FONT_PATH, size=FONT_SIZE)) phrase_image_small_font = generate_phrase_image( 1200, ImageFont.truetype(font=FONT_PATH, size=SMALL_FONT_SIZE)) # Proximity assessment p_assessment = proximity_assessment(image=phrase_image, diff_threshold=SYMBOLS_DIFF_THRESHOLD, phrase=PHRASE) p_assessment_small_font = proximity_assessment( image=phrase_image_small_font, diff_threshold=SYMBOLS_DIFF_THRESHOLD, phrase=PHRASE) p_assessment_for_table = p_ass_for_table(p_assessment) p_assessment_small_font_for_table = p_ass_for_table( p_assessment_small_font) # Report report.new_header(level=2, title=f'Оценка близости для размера шрифта {FONT_SIZE}') rows = len(p_assessment) columns = len(p_assessment[0]) if len(p_assessment) > 0 else 0 report.new_table(columns=columns, rows=rows, text=p_assessment_for_table) report.new_header( level=2, title=f'Оценка близости для размера шрифта {SMALL_FONT_SIZE}') rows = len(p_assessment_small_font) columns = len( p_assessment_small_font[0]) if len(p_assessment_small_font) > 0 else 0 report.new_table(columns=columns, rows=rows, text=p_assessment_small_font_for_table) report.new_line( text= 'Так как были использованы нормализованные параметры для оценки близости символов, размер шрифта не влияет на результат' ) report.create_md_file()
def generate_report(): report = MdUtils(file_name=f'./report.md') report.new_header(level=1, title='Контрастирование') report.new_line(text='Выполнил Ахманов Алексей Б18-514') for im in IMAGES: image_path = f'{helpers.folder_helper.IMAGES_FOLDER_PATH}/{im}' processed_image_folder_path = f'{image_path}_processed' os.makedirs(processed_image_folder_path, exist_ok=True) grayscaled_image_path = f'{processed_image_folder_path}/{im}_grayscaled.png' linear_contrasted_image_path = f'{processed_image_folder_path}/{im}_linear_contrasted.png' power_transformed_image_path = f'{processed_image_folder_path}/{im}_power_transformed_gamma=#.png' image = Image.open(fp=image_path).convert(constants.RGB_MODE) grayscaled = grayscale.mean_grayscale(image) grayscaled.save(grayscaled_image_path) # Contrasting enhancement.contrasting.linear_contrasting(grayscaled).save( linear_contrasted_image_path) for gamma in numpy.arange(START_GAMMA, STOP_GAMMA, STEP_GAMMA): gamma = round(gamma, GAMMA_ROUND_SIGNS) enhancement.contrasting.power_transformation(grayscaled, gamma=gamma)\ .save(power_transformed_image_path.replace('#', str(gamma))) # Report report.new_header(level=2, title=f'{im}') report.new_header(level=3, title='Исходная картинка') report.new_line( report.new_inline_image(text='Исходная картинка', path=image_path)) report.new_header(level=3, title='Оттенки серого') report.new_line( report.new_inline_image(text='Оттенки серого', path=grayscaled_image_path)) report.new_header(level=3, title='Линейное контрастирование') report.new_line( report.new_inline_image(text='Линейное контрастирование', path=linear_contrasted_image_path)) report.new_header(level=3, title='Степенное преобразование') for gamma in numpy.arange(START_GAMMA, STOP_GAMMA, STEP_GAMMA): gamma = round(gamma, GAMMA_ROUND_SIGNS) report.new_header(level=4, title=f'Gamma = {gamma}') report.new_line( report.new_inline_image( text='Степенное преобразование', path=power_transformed_image_path.replace('#', str(gamma)))) report.create_md_file()
def create_markdown(github, owner): """ create markdown for repos in github dict """ filename = 'prepbadge' print(f'Creating markdown file for {owner} repos in {filename}.md') md = MdUtils(file_name=filename, title='EdgeXFoundry Repo Badges Preview') for repo in github[0]['result']: md.new_header(level=1, title=md.new_inline_link(link=repo['github_url'], text=repo['name'])) for badge in repo['badges']: md.write(f'{badge} ') md.new_line('') md.create_md_file()
def run(self): Responder.run(self) caseNumber = self.get_param('data.caseId') #Friendly case number caseId = self.get_param('data.id') #Raw case number case_observables = self.api.get_case_observables(caseId).json() title = self.get_param('data.title', None, 'title is missing') description = self.get_param('data.description', None, 'description is missing') tags = self.get_param('data.tags') data = self.get_param('data') tlp = self.getTLP(data['tlp']) # Title #mdFile = MdUtils(file_name=str(caseNumber),title=tlp[0] + ' Case #' + str(caseNumber) + ': ' + title) mdFile = MdUtils(file_name=str(self.tmpPath) + str(caseNumber), title=tlp[0] + ' Case #' + str(caseNumber) + ': ' + title) # Case Summary caseSummary = self.getCaseSummary(data) mdFile.new_header(level=1, title='Case Summary') mdFile.new_line(str(tlp[1])) mdFile.new_table(columns=2, rows=int(caseSummary.__len__() / 2), text=caseSummary, text_align='left') # Case Description mdFile.new_line('<div style="page-break-after: always;"></div>') mdFile.new_line(' ') mdFile.new_header(level=1, title='Case Description') mdFile.new_line(str(data['description'])) mdFile.new_line(' ') # Task Log allTaskIds = self.getCaseTasks(caseId) allTaskIds_sorted = sorted(allTaskIds.items(), key=lambda x: x[1]['createdAt']) mdFile.new_header(level=1, title='Task Log Entries') for task in allTaskIds_sorted: title = str(task[1]['taskGroup'] + ' \: ' + task[1]['taskTitle']) createdAt = time.strftime( '%Y-%m-%dT%H:%M:%SZ', time.localtime( task[1]['createdAt'] / 1000)) #Convert epoch ms to sec then human readable mdFile.new_header(level=2, title=title) mdFile.new_line(str('**Created At:** ') + str(createdAt)) mdFile.new_line( str('**Created By:** ') + str(task[1]['createdBy'])) mdFile.new_line(str('**Assigned To:** ') + str(task[1]['owner'])) mdFile.new_line(str('**Case Status:** ') + str(task[1]['status'])) mdFile.new_line(' ') mdFile.new_line(str('**Description:** ')) mdFile.new_line(str(task[1]['description'])) mdFile.new_line(' ') caseTaskLog = self.getCaseTaskLog(task[0]) caseTaskLogEntries = (json.loads(caseTaskLog)) caseTaskLogEntries_sorted = sorted(caseTaskLogEntries, key=lambda k: k['createdAt']) for caseTaskLogEntry in caseTaskLogEntries_sorted: createdAt = time.strftime( '%Y-%m-%dT%H:%M:%SZ', time.localtime( caseTaskLogEntry['createdAt'] / 1000)) #Convert epoch ms to sec then human readable mdFile.new_line( str(createdAt) + ' : ' + str(caseTaskLogEntry['message'])) # Case Observables mdFile.new_header(level=1, title='Case Observables') caseObservables = self.getCaseObservables(case_observables) mdFile.new_table(columns=6, rows=int(caseObservables.__len__() / 6), text=caseObservables, text_align='left') # TLP Protocol description mdFile.new_line('<div style="page-break-after: always;"></div>') mdFile.new_line(' ') mdFile.new_header( level=1, title='Traffic Light Protocol (TLP) Definitions and Usage') tlpFooter = self.getTlpFooter() mdFile.new_table(columns=3, rows=5, text=tlpFooter, text_align='left') # Build TOC mdFile.new_table_of_contents(table_title='Table of Contents', depth=2) # Compile the report mdFile.create_md_file() # Add the report to the case addTask = json.loads(self.addTask(caseId)) taskId = addTask['_id'] # Add the MD file to the task addTaskLog = json.loads( self.addTaskLog(taskId, str(self.tmpPath) + str(caseNumber) + '.md')) # Cleanup the MD file os.remove(str(self.tmpPath) + str(caseNumber) + '.md') self.report({'report': 'created'})
def to_markdown(self): """Generate markdown file.""" params = self._construct() markdown = MdUtils(file_name=params._name) markdown.new_header(level=1, title=str(params._name).capitalize()) markdown.new_header(level=2, title="Description") markdown.new_paragraph(params._desc_feature_set) markdown.new_line() markdown.new_header(level=2, title="Feature Set Pipeline") markdown.new_header(level=3, title="Source") source = ["Reader", "Location"] for r, l in params._source: source.extend([r, l]) count_rows = len(source) // 2 markdown.new_table(columns=2, rows=count_rows, text=source, text_align="center") markdown.new_header(level=3, title="Sink") sink = ["Writer"] for w in params._sink: sink.extend([w]) count_rows = len(sink) markdown.new_table(columns=1, rows=count_rows, text=sink, text_align="center") markdown.new_header(level=3, title="Features") features = ["Column name", "Data type", "Description"] for c, desc in params._features: features.extend([c["column_name"], str(c["type"]), desc]) count_rows = len(features) // 3 markdown.new_table(columns=3, rows=count_rows, text=features, text_align="center") if self.save: markdown.create_md_file() else: return markdown.file_data_text
def main(argv): parser = argparse.ArgumentParser() parser.add_argument( '--results', '-r', help='A metrics.json file from a latest checkpoint evaluation run') parser.add_argument( '--config', '-c', help= 'A .json file with definitions of tested checkpoints (sota_checkpoints_eval.json)' ) parser.add_argument( '--output', '-o', help= "If specified, will output a config file specified in -c with target metric values updated " "to what was measured in the metrics.json file supplied via -r") args = parser.parse_args(args=argv) results = args.results config = args.config output = args.output measured_metrics = json.load(open(results, 'r')) sota_checkpoints_eval = json.load(open(config), object_pairs_hook=OrderedDict) # Output tables for per-sample README files for sample_type in sota_checkpoints_eval: table_rows = get_results_table_rows(sota_checkpoints_eval[sample_type], measured_metrics, sample_type, table_format='per_sample') file_name = 'results_{}.md'.format(sample_type) mdfile = MdUtils(file_name=file_name) write_table_to_md_file(mdfile, table_rows) mdfile.create_md_file() # Somehow the MDUtils outputs 4 empty lines prior to the actual table in the target file. delete_four_head_lines(file_name) # Output the overview table for the top-level README file overview_file_name = 'results_overview.md' mdfile = MdUtils(file_name=overview_file_name) # Compose a mini-TOC mdfile.new_line('### PyTorch models') mdfile.new_line() for sample_type in sota_checkpoints_eval: mdfile.new_line('<a name = "pytorch_object_detection" > </a>') mdfile.new_line() mdfile.new_line( f'#### {SAMPLE_TYPE_TO_SAMPLE_DISPLAY_NAME_DICT[sample_type]}') mdfile.new_line() table_rows = get_results_table_rows(sota_checkpoints_eval[sample_type], measured_metrics, sample_type, table_format='overview') write_table_to_md_file(mdfile, table_rows) mdfile.create_md_file() delete_four_head_lines(overview_file_name) if args.output is not None: update_target_metrics_and_thresholds(sota_checkpoints_eval, measured_metrics) with open(output, "w") as write_file: json.dump(sota_checkpoints_eval, write_file, indent=4)
def generate_report(): report = MdUtils(file_name='./report.md') report.new_header(level=1, title='Фильтры и морфология') report.new_line(text='Выполнил Ахманов Алексей Б18-514') for im in IMAGES: image_path = f'{folder_helper.IMAGES_FOLDER_PATH}/{im}' processed_image_folder_path = f'{image_path}_processed' os.makedirs(processed_image_folder_path, exist_ok=True) image = Image.open(image_path).convert(constants.RGB_MODE) # Smoothing spatial_smoothed_image_path = f'{processed_image_folder_path}/{im}_spatial_smoothed.png' spatial_smoothed_difference_image_path = f'{processed_image_folder_path}/{im}_spatial_smoothed_difference.png' grayscaled = grayscale.mean_grayscale(image) filtration.spatial_smoothing(grayscaled).save( spatial_smoothed_image_path) filtration.spatial_smoothing_difference(grayscaled).save( spatial_smoothed_difference_image_path) # Report report.new_header(level=2, title=f'{im}') report.new_header(level=3, title='Исходная картинка') report.new_line( report.new_inline_image(text='Исходная картинка', path=image_path)) report.new_header(level=3, title='Пространственное сглаживание') report.new_line( report.new_inline_image(text='Пространственное сглаживание', path=spatial_smoothed_image_path)) report.new_header(level=3, title='Разница сглаженной и исходной') report.new_line( report.new_inline_image( text='Разница сглаженной и исходной', path=spatial_smoothed_difference_image_path)) report.create_md_file()
def report(fails: Dict[str, List[Fail]]): print('Start writing report') start_time = time.time() mdFile = MdUtils(file_name='report', title='OCR Recognition Report') for fail_typ in fails.keys(): mdFile.new_header(level=2, title=fail_typ.title, add_table_of_contents='n') mdFile.new_header(level=3, title="Explanation", add_table_of_contents='n') mdFile.new_paragraph(fail_typ.explanation) mdFile.new_line( f'There were in total {len(fails[fail_typ])} of {fail_typ.title}') mdFile.new_header(level=3, title="Fails", add_table_of_contents='n') mdFile.new_line() if fail_typ == LenFail: md_text: List[str] = [ 'Font name - size', 'Words in PDF', 'Recognized words', 'Levenshtein distance of total text' ] for len_fail in fails[fail_typ]: md_text.extend([ f'{len_fail.font_name} - {len_fail.font_size}', str(len_fail.pdf_len), str(len_fail.ocr_len), str(len_fail.total_levenshtein) ]) mdFile.new_table(columns=4, rows=int(len(md_text) / 4), text=md_text, text_align='center') else: for fail in fails[fail_typ]: mdFile = fail.to_md(mdFile) mdFile.create_md_file() print(f'Finished reporting after {time.time() - start_time} sec')
OUTPUT_PATH = Path(args.output).resolve() # %% parser = BibTexParser(common_strings=True) with open(BIBTEX_PATH) as bibtex_file: bib_database = bibtexparser.load(bibtex_file, parser=parser) # %% md = frontmatter.load(FORMAT_PATH) FORMAT = md.content GROUPBY = md.metadata.get('groupBy') SORTBY = md.metadata.get('sortBy') # %% entries = bib_database.entries entries.sort(key=lambda x: x.get(GROUPBY, ''), reverse=True) #%% md_file = MdUtils(file_name=OUTPUT_PATH.stem, title=OUTPUT_PATH.stem) # %% def format_entry(md_file: MdUtils, entry: Dict): assert entry.get('title') formatted = FORMAT.format(TITLE=entry.get('title'), AUTHOR=entry.get('author', '').replace(' and', ','), YEAR=entry.get('year', ''), JOURNAL=entry.get('journal', '')) md_file.new_line(formatted) #%% for key, group in groupby(entries, lambda x: x.get(GROUPBY, '')):