def write_pairs_to_md_file(self, file_path=None): """ This method writes the generated pairs to a md file, specified by pairs_md_file field of config.yml """ if file_path is None: file_path = self.config["pairs_md_file"] md_emoji = [ ":coffee:", ":tea:", ":beer:", ":cocktail:", ":tropical_drink:", ":wine_glass:", ":cake:", ":cookie:", ":croissant:", ":pancakes:", ":pretzel:", ":doughnut:", ":pie:", ":cup_with_straw:" ] md_file = MdUtils(file_name=file_path, title="Robogals Coffee Roulette") for week in range(self.weeks): md_file.new_paragraph(f"*Week {week + 1}:*") for pair in self.pairings[week]: if pair.person_3 is None: md_file.new_line( f"*{pair.person_1.name}* {choice(md_emoji)} is paired with " f"*{pair.person_2.name}* {choice(md_emoji)}") else: md_file.new_line( f"*{pair.person_1.name}* {choice(md_emoji)} is paired with " f"*{pair.person_2.name}* {choice(md_emoji)} and " f"*{pair.person_3.name}* {choice(md_emoji)}") md_file.new_paragraph() md_file.create_md_file()
def generate(self): def _write_tool_command_arguments_and_options(): writer.write_arguments(f'command `{self.tool_command}`', self.tool_optional_args, self.tool_required_args) writer.write_options(self.tool_options) # docs/<tool-name>/README.md os.makedirs(self.tool_prefix, exist_ok=True) md = MdUtils(file_name=f'{self.tool_prefix}/README', title=self.tool_command) writer = Writer(md) writer.write_description(self.tool.__doc__) writer.write_tool_command_usage(self) _write_tool_command_arguments_and_options() writer.write_actions_table(self.tool_serialized_actions) md.create_md_file() for action in self.tool_serialized_actions: # docs/<tool-name>/<action-name>.md md = MdUtils(file_name=f'{self.tool_prefix}/{action.action_name}', title=action.action_name) writer = Writer(md) writer.write_description(action.description) writer.write_action_command_usage(self, action) writer.write_arguments(f'action `{action.action_name}`', action.optional_args, action.required_args) _write_tool_command_arguments_and_options() md.create_md_file()
def createMarkdownFile(pz_releases, issues, markdownFileName): # title md_title = "PPG Services Release Summary ({}, {})".format( dayOfWeek, date_str) # Header mdFile = MdUtils(file_name=markdownFileName, title=md_title) # for each release line, add issues releases = sorted(pz_releases.keys(), reverse=True) for r in releases: release = pz_releases.get(r) mdFile.new_line() mdFile.new_line() mdFile.new_header(level=1, title='Release {} ({}):'.format( release[1], release[2])) for issue in issues: if issue.fields.customfield_10414[0] == release[1]: text = '{}: {}.\n'.format(issue.key, issue.fields.summary) mdFile.new_line(text) mdFile.new_line() # Write to markdown document mdFile.create_md_file() # create html file with open('{}.md'.format(markdownFileName), "r", encoding="utf-8") as input_file: text = input_file.read() html = markdown.markdown(text) with open('{}.html'.format(markdownFileName), "w", encoding="utf-8", errors="xmlcharrefreplace") as output_file: output_file.write(html)
def test_references_placed_in_markdown_file(self): md_file = MdUtils(file_name="Test_file", title="") text = "mdutils library" reference_tag = "mdutils library" link = "https://github.com/didix21/mdutils" expected_value = "\n\n\n[mdutils library0][mdutils library0]\n" \ "[mdutils library1][mdutils library1]\n" \ "[mdutils library2][mdutils library2]\n" \ "[mdutils library3][mdutils library3]\n" \ "\n\n\n" \ "[mdutils library0]: https://github.com/didix21/mdutils0\n" \ "[mdutils library1]: https://github.com/didix21/mdutils1\n" \ "[mdutils library2]: https://github.com/didix21/mdutils2\n" \ "[mdutils library3]: https://github.com/didix21/mdutils3\n" for i in range(4): md_file.write(md_file.new_reference_link( link=link + str(i), text=text + str(i), reference_tag=reference_tag + str(i))) md_file.write('\n') md_file.create_md_file() created_data = MarkDownFile.read_file('Test_file.md') self.assertEqual(expected_value, created_data)
def report(self, reports): xml_dict = parsexmlfile(self.files[0]) result = json.dumps(xml_dict) nmap_results = json.loads(result) ports = nmap_results["nmaprun"]["host"]["ports"] #cpe, portid, product, name, version, hosts/up if 'port' in ports: open_ports = ports["port"] else: open_ports = [] # temp self.logger.info("Creating report for " + self.name) outfile = f"{self.reportdir}/{self.name}.md" title = f"PENSEC - {self.name.capitalize()} Report" reportfile = MdUtils(file_name=outfile, title=title) reportfile.new_header(level=1, title="Common Statistics") reportfile.new_paragraph(f"{len(open_ports)} open ports\n") if len(open_ports) > 0: reportfile.new_header(level=2, title="Open Ports") # list with open ports, cpe, etc reportfile.create_md_file() self.logger.info("Report saved in " + outfile) return {"open_ports": open_ports}
def py_to_md(data, savepath, githubpath=None): """ Writes to a markdown file the content of a .py file. It writes the name of all the classes and their methods with the corresponding docstrings as well as all functions that are not class methods. :param data: dictionary of classes that belong to a .py, from parse_pyfile :param savepath: str, path to the .md file to save :param githubpath: str, optional. URL to the same .py on github """ print(f"writing - {savepath}") md = MdUtils(file_name=savepath) # Iterate classes for cl in data.values(): # class/function specific if cl["isclass"]: add_class_to_md(md, cl, githubpath=githubpath) else: add_func_to_md(md, cl, githubpath=githubpath) md.new_table_of_contents(table_title="Contents", depth=2) md.create_md_file()
def generateReadme(): # defining a function to generate the readme file mdFile = MdUtils(file_name = "LOCAL_README.md") mdFile.new_header(level=1, title="Compilation Of DSC-RAIT resources") mdFile.new_paragraph("This is a compiled list of resources shared on the DSC-RAIT Discord Server!") for d in db.resources.find(): ''' db.resources.find() queries the database and returns all the data stored in the form of an iterable which contains all the domains and links present under each domain, so we are looping through the iterable to insert each domain and its respective resources in the readme file. Each element of the iterable is a python dictionary (in this case, it is 'd') { 'domain': <domain name>, 'links': [ {'info': <link-info1>, 'link': <link1>}, {'info': <link-info2>, 'link': <link2>}, .... ] } ''' mdFile.new_header(level = 2, title = d['domain']) for l in d['links']: mdFile.new_paragraph(text= f"{l['info']}: {l['link']}") mdFile.create_md_file() text = mdFile.read_md_file(file_name = "LOCAL_README.md") # Read the created Readme file return its contents as a string return text
def generate(self): class_args_serializer = ArgumentsSerializer( self.tool.CLASS_ARGUMENTS).serialize() self.tool_optional_args = class_args_serializer.optional_args self.tool_serialized_actions = self._serialize_actions() self.tool_options = self._serialize_default_options() # docs/<tool-name>/README.md os.makedirs(self.tool_prefix, exist_ok=True) md = MdUtils(file_name=f'{self.tool_prefix}/README', title=self.tool_command) writer = Writer(md) writer.write_description(self.tool.__doc__) writer.write_tool_command_usage(self) writer.write_arguments(f'command `{self.tool_command}`', self.tool_optional_args, []) writer.write_options(self.tool_options) writer.write_actions_table(self.tool_serialized_actions) md.create_md_file() for action in self.tool_serialized_actions: # docs/<tool-name>/<action-name>.md md = MdUtils(file_name=f'{self.tool_prefix}/{action.action_name}', title=action.action_name) writer = Writer(md) writer.write_description(action.description) writer.write_action_command_usage(self, action) writer.write_arguments(f'action `{action.action_name}`', action.optional_args, action.required_args) writer.write_arguments(f'command `{self.tool_command}`', self.tool_optional_args, []) writer.write_options(self.tool_options) md.create_md_file()
def generateShapeWorksCommandDocumentation(mdFilename = '../../docs/tools/ShapeWorksCommands.md', add_toc = False): # settings from Executable.cpp opt_width = 32 indent = 2 spacedelim = ''.ljust(indent) mdFile = MdUtils(file_name = mdFilename, title = '') mdFile.new_header(level = 1, title = 'ShapeWorks Commands') # add intro paragraph intro_paragraph = "`shapeworks` is a single executable for ShapeWorks with a set of sub-executables (commands) that are flexible, modular, loosely coupled, and standardized subcommands, with interactive help to perform individual operations needed for a typical shape modeling workflow that includes the Groom, Optimize, and Analyze phases.\n" intro_paragraph = intro_paragraph + "!!! danger " + "\"" + "Activate shapeworks environment" + "\"" + "\n" intro_paragraph = intro_paragraph + "\t Each time you use ShapeWorks from the command line, you must first activate its environment using the `conda activate shapeworks` command on the terminal. \n" intro_paragraph = intro_paragraph + "!!! danger " + "\"" + "Add shapeworks to your path" + "\"" + "\n" intro_paragraph = intro_paragraph + "\t Please make sure that `shapeworks` is in your path. See [Adding to PATH Environment Variable](../dev/paths.md). \n" mdFile.new_paragraph(intro_paragraph) if add_toc: intro_marker = mdFile.create_marker(" ") # mark the after-intro to add table of contents after the introduction paragraph cmd = "shapeworks" CommandsUtils.addCommand(mdFile, cmd, level = 2, spacedelim = spacedelim, verbose = True) if add_toc: mdFile.new_table_of_contents(table_title='Table of Contents', depth=3, marker = intro_marker) mdFile.create_md_file()
def onboard(self, args): if hasattr(args, 'checklist'): mdFile = MdUtils(file_name='Example_Markdown', title='Markdown File Example') items = ["engineering handbook", "complete corporate training"] mdFile.new_checkbox_list(items) mdFile.create_md_file()
def test_new_checkbox_checked_list(self): md_file = MdUtils(file_name="Test_file", title="") md_file.new_checkbox_list(self.complex_items, checked=True) md_file.create_md_file() self.assertEqual(self.expected_list.replace('-', '- [x]'), MarkDownFile.read_file('Test_file.md'))
def test_new_list(self): md_file = MdUtils(file_name="Test_file", title="") md_file.new_list(self.complex_items) md_file.create_md_file() self.assertEqual(self.expected_list, MarkDownFile.read_file('Test_file.md'))
def segment_images(self, images: list[str]) -> list[np.ndarray]: # Lets prepare the markdown report markdown_file = MdUtils(file_name="Results", title="Pratical Work - Lung Segmentation") # Create the table contents self.table_contents = [ "Original Image (cropped to ROI)", "Generated Mask", ] # Iterate over each image pbar = tqdm(images) segmented = [] for img_location in pbar: # Write status to progress bar pbar.set_description(f"Segmenting image '{img_location}'") # Segment the image segmented.append(self.__segment(img_location)) # Create the table markdown_file.new_table( columns=2, rows=len(images) + 1, text=self.table_contents, text_align="center", ) # Save the Markdown file markdown_file.create_md_file() return np.array(segmented)
def main(): parser = argparse.ArgumentParser() parser.add_argument( 'db', type=str, help= 'add the name of the database folder (NOTE: the folder must be in the same directory as the prorgram file' ) args = parser.parse_args() page_titles = get_pages(args.db) #print(args.db) #print(page_titles) ref_texts = {} for ref in page_titles: ref_texts[ref] = find_refs(args.db, ref) if ref_texts[ref] == []: del ref_texts[ref] refs_page = '' for k, v in ref_texts.items(): refs_page = refs_page + ''' - [[{}]] was referenced on: \n'''.format(k) for a in v: refs_page = refs_page + ''' \t - [[{}]] \n'''.format(a) mdFile = MdUtils(file_name='Aggregated Unlinked References') mdFile.write(refs_page) mdFile.create_md_file()
def generate_report(): df_final = predictionsPerSecond() explode = (0.1, 0.1, 0.1) plt.pie(df_final.Label.value_counts(), explode=explode, labels=df_final.Label.value_counts().index.to_list(), autopct='%1.1f%%', shadow=True, startangle=90) plt.title('Tiempo de uso de mascarilla') plt.savefig('predictions.png') mdFile = MdUtils( file_name='report', title='Reporte de predicción de uso correcto de mascarilla') mdFile.new_line( mdFile.new_inline_image(text='Predicciones', path='predictions.png')) mdFile.new_header(title='Tablas de resultados', level=1) mdFile.new_line('Juan Pablo Carranza Hurtado') mdFile.new_line('José Alberto Ligorría Taracena') mdFile.create_md_file() f = open("report.html", "w") f.write(markdown2.markdown_path('report.md')) f.write(pd.crosstab(df_final.Time, df_final.Label).to_html()) f.write('<h1> Cantidad de segundos de utilización de mascarilla </h1>') f.write(pd.DataFrame(df_final.Label.value_counts()).to_html()) f.close()
def test_new_table_of_contents(self): # Create headers level 1 and 2. md_file = MdUtils(file_name="Test_file", title="Testing table of contents") list_headers = ["Header 1", "Header 1.1", "Header 2", "Header 2.2", "Header 2.3"] table_of_content_title = MdUtils(file_name='').new_header(level=1, title='Index', style='setext') md_file.new_header(level=1, title=list_headers[0]) md_file.new_header(level=2, title=list_headers[1]) md_file.new_header(level=1, title=list_headers[2]) md_file.new_header(level=2, title=list_headers[3]) md_file.new_header(level=2, title=list_headers[4]) # Testing Depth 1 table_of_contents_result = md_file.new_table_of_contents(table_title="Index", depth=1) table_of_content_expected = table_of_content_title \ + '\n* [' + list_headers[0] + '](#' \ + re.sub('[^a-z0-9_\-]', '', list_headers[0].lower().replace(' ', '-')) + ')' \ + '\n* [' + list_headers[2] + '](#' \ + re.sub('[^a-z0-9_\-]', '', list_headers[2].lower().replace(' ', '-')) + ')\n' self.assertEqual(table_of_contents_result, table_of_content_expected) # Testing created file md_file.create_md_file() data_file_result = MdUtils('').read_md_file('Test_file') data_file_expected = MdUtils('').new_header(1, "Testing table of contents", 'setext') \ + md_file.table_of_contents \ + md_file.file_data_text self.assertEqual(data_file_result, data_file_expected) os.remove('Test_file.md') # Testing Depth 2 md_file = MdUtils(file_name="Test_file", title="Testing table of contents") list_headers = ["Header 1", "Header 1.1", "Header 2", "Header 2.2", "Header 2.3"] table_of_content_title = MdUtils(file_name='').new_header(level=1, title='Index', style='setext') md_file.new_header(level=1, title=list_headers[0]) md_file.new_header(level=2, title=list_headers[1]) md_file.new_header(level=1, title=list_headers[2]) md_file.new_header(level=2, title=list_headers[3]) md_file.new_header(level=2, title=list_headers[4]) table_of_contents_result = md_file.new_table_of_contents(table_title="Index", depth=2) table_of_content_expected = table_of_content_title for x in range(len(list_headers)): if x in (0, 2): table_of_content_expected += '\n* [' + list_headers[x] + '](#' \ + re.sub('[^a-z0-9_\-]', '', list_headers[x].lower().replace(' ', '-')) \ + ')' else: table_of_content_expected += '\n\t* [' + list_headers[x] + '](#' \ + re.sub('[^a-z0-9_\-]', '', list_headers[x].lower().replace(' ', '-')) \ + ')' table_of_content_expected += '\n' self.assertEqual(table_of_contents_result, table_of_content_expected) md_file.create_md_file() data_file_result = MdUtils('').read_md_file('Test_file') data_file_expected = MdUtils('').new_header(1, "Testing table of contents", 'setext') \ + md_file.table_of_contents \ + md_file.file_data_text self.assertEqual(data_file_result, data_file_expected) os.remove('Test_file.md')
def main(): parser = argparse.ArgumentParser() parser.add_argument('ppt_name', type=str, help='add the name of the PowerPoint file(NOTE: the folder must be in the same directory as the prorgram file') args = parser.parse_args() pptx_name = args.ppt_name pptx_name_formatted = pptx_name.split('.')[0] prs = Presentation(pptx_name) path = '{}_converted'.format(pptx_name_formatted) if not os.path.exists(path): os.mkdir(path) images_folder = '{}_images'.format(pptx_name_formatted) images_path = os.path.join(path, images_folder) if not os.path.exists(images_path): os.mkdir(images_path) ppt_dict = {} #Keys: slide numbers, values: slide content texts = [] slide_count = 0 picture_count = 0 for slide in prs.slides: texts = [] slide_count += 1 slide_parts = list(slide._part.related_parts.keys()) for part in slide_parts: image_part = slide._part.related_parts[part] if type(image_part) == pptx.parts.image.ImagePart or pptx.opc.package.Part: file_startswith = image_part.blob[0:1] if file_startswith == b'\x89' or file_startswith == b'\xff' or file_startswith == b'\x47': with open('{}/image{}_slide{}.png'.format(images_path, picture_count, slide_count), 'wb') as f: f.write(image_part.blob) picture_count += 1 for shape in slide.shapes: if shape.has_text_frame: if '\n' in shape.text: splitted = shape.text.split('\n') for word in splitted: if word != '': texts.append(word) elif shape.text == '': continue else: texts.append(shape.text) ppt_dict[slide_count] = texts ppt_content = '' for k,v in ppt_dict.items(): ppt_content = ppt_content + ' - Slide number {}\n'.format(k) for a in v: ppt_content = ppt_content + '\t - {}\n'.format(a) mdFile = MdUtils(file_name='{}/{}'.format(path,path)) #second argument isn't path, it just shares the path name. mdFile.write(ppt_content) mdFile.create_md_file()
def gen_api_markdown(target_api_class: Any, output_file_path: str, extra_head: str = ""): md_file = MdUtils(file_name=output_file_path) if extra_head: md_file.write(extra_head) md_file.new_header(level=1, title="") for method in get_own_methods(target_api_class): method_name = method.__name__ md_file.new_header(level=2, title=method_name) docstring = method.__doc__ if docstring: md_file.write(f"\n{method.__doc__}\n") parameter_table = ["参数名称", "类型", "默认值"] for parameter in inspect.signature(method).parameters.values(): if parameter.name == "self": continue row = [parameter.name] if isinstance(parameter.annotation, str): row.append(parameter.annotation) else: if get_origin(parameter.annotation) is Union: # group_msg = group_message continue row.append(parameter.annotation.__name__) if parameter.default == inspect._empty: row.append("无") else: if parameter.default: row.append(parameter.default) else: row.append("无") parameter_table.extend(row) # debug(parameter_table) # md_file.new_line() if len(parameter_table) > 3: md_file.new_table( columns=3, rows=int(len(parameter_table) / 3), text=parameter_table, text_align="center", ) md_file.create_md_file() print(f"成功生成{output_file_path}文件")
def test_create_md_file(self): md_file = MdUtils("Test_file") md_file.create_md_file() md_file_expect = Path('Test_file.md') if md_file_expect.is_file(): os.remove('Test_file.md') pass else: self.fail()
def write_summary_file(summary, savepath): print(f"writing - {savepath}") md = MdUtils(file_name=savepath) for nid, (depth, path) in summary.items(): name = path.name path = Path(*path.parts[1:]) md.new_line(" " * depth + "* " + f"[{name}]({path})") md.create_md_file()
def main(self): mdFile = MdUtils(file_name='sitelist', title='Scraper Site List') mdFile.new_header(level=1, title='Sites') data = self.loop_spiders() mdFile.new_line() mdFile.new_table(columns=4, rows=int(len(data) / 4), text=data, text_align='center') mdFile.create_md_file()
def create_readme(self): '''saves a .md file with descritption of a maximal setup TODO minimal readme just for subset making - n articles - regex patterns used for subsetting ''' readme = MdUtils(file_name=os.path.join(self.work_dir_path, 'README'), title='Subset readme') # produce content date_generated = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') # SECTION 1: DATE GENERATED readme.new_paragraph() readme.new_header(level=1, title='Date', style='setext') readme.new_line(date_generated) # SECTION 2: FOLDER STRUCTURE SCPECIFICATION readme.new_paragraph() readme.new_header(level=1, title='File structure', style='setext') # top folder: workdir readme.new_line('```') readme.new_line('{}/'.format(os.path.basename(self.work_dir_path))) readme.new_line('├── data_subset/ #raw data used for this analysis') readme.new_line('│ └── *.ndjson') readme.new_line('├── preprocessed/ #preprocessed data') readme.new_line('│ └── *.ndjson') readme.new_line('├── model_input/ #dataset used for topic modeling') readme.new_line('│ └── *.ndjson') readme.new_line('├── mdl_lda/ #outputs of LDA') readme.new_line('│ ├── model_states/ #serialized trained LDAs') readme.new_line('│ │ └── *.pkl') readme.new_line('│ ├── topic_overviews/ #top terms per topic') readme.new_line('│ │ └── *.txt') readme.new_line('│ ├── doc_top_mat/ #document-topic matrices') readme.new_line('│ │ └── *.ndjson') readme.new_line('│ ├── pyldavis/ #pyldavis plots') readme.new_line('│ │ └── *.html') readme.new_line( '│ └── model_comparison.png #elbow plot of n topics and coherence' ) readme.new_line( '└── mdl_ntr/ #outputs of Novelty, Transience, Resonance') readme.new_line(' └── w{number}/ #results at window n') readme.new_line(' ├── *.csv') readme.new_line(' └── fig/ #plots of results at that window') readme.new_line(' └── *.png') readme.new_line('```') readme.new_line() readme.create_md_file()
def _write_tool_page(self): os.makedirs(self.tool_prefix, exist_ok=True) md = MdUtils(file_name=f'{self.tool_prefix}/README', title=self.tool_command) writer = Writer(md) writer.write_description(self.tool.__doc__) writer.write_command_usage(self) self._write_tool_command_arguments_and_options(writer) writer.write_actions_table(self.tool_serialized_actions) writer.write_action_groups_table(self.tool_serialized_action_groups) md.create_md_file()
def create_md_issue_report(org, repos, issue_state='all', start_time=None, token=None): gh = GithubConnection.getConnection(token=token) _md_file = MdUtils(file_name='pdsen_issues', title='PDS EN Issues') for _repo in gh.repositories_by(org): if repos and _repo.name not in repos: continue issues_map = get_issues_groupby_type(_repo, state=issue_state, start_time=start_time) convert_issues_to_planning_report(_md_file, _repo.name, issues_map) _md_file.create_md_file()
def write_league_md(league, my_ros_dudes, my_ros_ranks, unowned_ros_dudes, unowned_ros_ranks, def_advice, kick_advice, weekly_team, weekly_tiers, potential_stream_names, potential_stream_pos, potential_stream_tiers, starters): from mdutils.mdutils import MdUtils md_file = 'league_' + league['nickname'].replace(' ', '') mdFile = MdUtils(file_name=md_file, title=league['nickname']) mdFile.new_header(level=2, title='ROS Valuations', add_table_of_contents='n') mdFile.new_header(level=3, title='Your Roster', add_table_of_contents='n') list_of_strings = ["Player", "Value"] for p, v in zip(my_ros_dudes, my_ros_ranks): list_of_strings.extend([p, str(v)]) mdFile.new_table(columns=2, rows=int(len(list_of_strings) / 2), text=list_of_strings, text_align='center') mdFile.new_header(level=3, title='Available Free Agents', add_table_of_contents='n') list_of_strings = ["Player", "Value"] for p, v in zip(unowned_ros_dudes, unowned_ros_ranks): list_of_strings.extend([p, str(v)]) mdFile.new_table(columns=2, rows=int(len(list_of_strings) / 2), text=list_of_strings, text_align='center') mdFile.new_header(level=2, title='Weekly Tiers', add_table_of_contents='n') mdFile.new_header(level=3, title='Suggested Starting Lineup', add_table_of_contents='n') mdFile.new_header(level=3, title='Substitutions Necessary', add_table_of_contents='n') mdFile.new_header(level=3, title='Skill Streams Available', add_table_of_contents='n') mdFile.new_header(level=2, title='Weekly Streaming Advice', add_table_of_contents='n') mdFile.new_header(level=3, title='Defenses', add_table_of_contents='n') mdFile.new_header(level=3, title='Kickers', add_table_of_contents='n') mdFile.create_md_file() return md_file
def create_markdown(region): """ Create markdown file which Kevin's figures are embedded. """ region_short = regionNames[region] mdFile = MdUtils(file_name=today) mdFile.new_line('---') mdFile.new_line(f'title: \"{today}\"') mdFile.new_line(f'date: "{today}"') mdFile.new_line('description: "Some amazing findings in here!"') mdFile.new_line(f'region: "{region}"') mdFile.write('\n---\n') mdFile.new_paragraph( '**Figure 1.** The non-residential mobility index is a measure of the average amount of time spent outside of home, based on smartphone mobility data (the index is scaled so that levels in the baseline period from Jan 3 to Feb 6, 2020 represent 100).' ) # <img src="mobilityAlone_1yr.png" style="width:2400px;height:1200px"/> img_path = f"/figures/{region_short}/{today}/mobilityAlone_1yr.png" width1, height1 = get_img_size(img_path) mdFile.new_paragraph( Html.image(path=img_path, size=f'{width1}x{height1}', align='center')) mdFile.new_paragraph( f'**Figure 2.** Association between non-residential mobility and COVID-19 case growth across 7 Canadian provinces, March 15, 2020 to {today_text}.' ) img_2_path = f"/figures/{region_short}/{today}/mobility_byMonth.png" width2, height2 = get_img_size(img_2_path) mdFile.new_paragraph( Html.image(path=img_2_path, size=f'{width2}x{height2}', align='center')) mdFile.new_paragraph( 'Weekly COVID-19 growth rate (Y = cases in given week / cases in prior week) is strongly associated with the non-residential mobility in the prior 2-week period (X). The point where the regression line (black) crosses the line representing no COVID-19 case growth (red line) is the average Canadian mobility threshold. The Canadian mobility threshold is lower in spring, fall and winter of 2020, compared to the summer of 2020.', align="center") mdFile.new_paragraph( f'**Figure 3.** Left panel: Variation in mobility (circles) and the estimated mobility threshold (purple dash), for 5 Canadian provinces with the most cases, March 15, 2020 to {today_text}, 2021. Right panel: Association between mobility gap and COVID-19 growth rate.' ) img_3_path = f"/figures/{region_short}/{today}/mobilityGap_both.png" width3, height3 = get_img_size(img_3_path) mdFile.new_paragraph( Html.image(path=img_3_path, size=f'{width3}x{height3}', align='center')) mdFile.new_paragraph( 'The mobility threshold is the estimated level of mobility needed to control COVID-19 case growth. This threshold is highest in summer and in less populated provinces. When mobility decreased below the mobility threshold (blue dots), weekly COVID-19 cases counts decreased. In November 2020, Manitoba was the only province that implemented a lockdown that had successfully crossed the mobility threshold and has led to reductions in COVID-19 case growth. Other provinces waited until December 2020.', align="center") mdFile.create_md_file() Path(f'{today}.md').rename( f'./web_output/reports/{region_short}/{today}.md') with open(f'./web_output/reports/{region_short}/{today}.md', "r") as f: lines = f.readlines() with open(f'./web_output/reports/{region_short}/{today}.md', "w") as f: for line in lines[4:]: f.write(line)
def generate_report(): report = MdUtils(file_name=f'./report.md') report.new_header(level=1, title='Выделение признаков символов') report.new_line(text='Выполнил Ахманов Алексей Б18-514') report.new_line(text=f'Алфавит - {ALPHABET}') for letter in ALPHABET: letter_folder_path = f'./{folder_helper.IMAGES_FOLDER_PATH}/letter_{letter}' os.makedirs(letter_folder_path, exist_ok=True) letter_image_path = f'{letter_folder_path}/{letter}.png' letter_image = Image.new(mode=constants.GRAYSCALE_MODE, size=(FONT_SIZE, FONT_SIZE), color=WHITE) result = ImageDraw.Draw(im=letter_image, mode=constants.GRAYSCALE_MODE) result.text(xy=(0, 0), text=letter, font=FONT, fill=0, anchor='lt') letter_image = cut_empty_rows_and_cols(letter_image) letter_image.save(letter_image_path) report.new_header(level=2, title=f'Буква {letter}') report.new_line(report.new_inline_image(text=letter, path=letter_image_path)) thresholded = simple_threshold(letter_image, 100) report.new_line(text=f'Вес черного - {black_weight(thresholded)}') report.new_line(text=f'Удельный вес черного - {normalized_black_weight(thresholded)}') center = gravity_center(thresholded) report.new_line(text=f'Координаты центра масс - ({center[0]}, {center[1]})') normalized_center = normalized_gravity_center(thresholded) report.new_line(text=f'Нормированные координаты центра масс - ({normalized_center[0]}, {normalized_center[1]})') report.new_line(text=f'Центральный горизонтальный осевой момент - {central_horizontal_axial_moment(thresholded)}') report.new_line(text=f'Центральный вертикальный осевой момент - {central_vertical_axial_moment(thresholded)}') report.new_line(text=f'Нормированный центральный горизонтальный осевой момент -' f'{normalized_central_horizontal_axial_moment(thresholded)}') report.new_line(text=f'Нормированный центральный вертикальный осевой момент -' f'{normalized_central_vertical_axial_moment(thresholded)}') h_levels, h_projections = horizontal_projection(thresholded) pyplot.plot(h_levels, h_projections) pyplot.title(f'Horizontal projection {letter}') path = f'{letter_folder_path}/horizontal_projection_{letter}.png' pyplot.savefig(path) pyplot.close() report.new_line(report.new_inline_image(text=letter, path=path)) v_levels, v_projections = vertical_projection(thresholded) pyplot.plot(v_levels, v_projections) pyplot.title(f'Vertical projection {letter}') path = f'{letter_folder_path}/vertical_projection_{letter}.png' pyplot.savefig(path) pyplot.close() report.new_line(report.new_inline_image(text=letter, path=path)) report.new_line() report.create_md_file()
def test_new_header(self): file_name = 'Test_file' md_file = MdUtils(file_name) string_headers_expected = "\n# Header 0\n\n## Header 1\n\n### Header 2\n\n#### Header 3\n\n" \ "##### Header 4\n\n###### Header 5\n" string_headers = "" for x in range(6): string_headers += md_file.new_header(level=(x + 1), title='Header ' + str(x), style='atx') self.assertEqual(string_headers, string_headers_expected) md_file.create_md_file() file_result = md_file.read_md_file(file_name) self.assertEqual(file_result, '\n\n\n' + string_headers_expected)
def generateReadme(): mdFile = MdUtils(file_name="LOCAL_README.md") mdFile.new_header(level=1, title="Compilation Of DSC-RAIT resources") mdFile.new_paragraph( "This is a ``README.md`` file generated by asmrPy to test it's capabilities. Stay tuned for more updates!" ) for d in db.resources.find(): mdFile.new_header(level=2, title=d['domain']) for l in d['links']: mdFile.new_paragraph(text=f"{l['info']}: {l['link']}") mdFile.create_md_file() text = mdFile.read_md_file(file_name="LOCAL_README.md") return text
def _write_action_group_page(self, action_group: ActionGroup): group_path = f'{self.tool_prefix}/{action_group.name}' md = MdUtils(file_name=group_path, title=action_group.name) writer = Writer(md) writer.write_description(action_group.description) writer.write_command_usage(self, action_group=action_group) self._write_tool_command_arguments_and_options(writer) writer.write_actions_table(action_group.actions, action_group=action_group) md.create_md_file() os.makedirs(group_path, exist_ok=True) for action in action_group.actions: self._write_action_page(action, action_group=action_group)