Beispiel #1
0
def episodeToMarkdown(data, podInfo, auth):
    """ Take in a podcast episode data from PodcastIndex and return Markdowns """
    fileName = str(data['id']) + '.md'
    published = time.strftime("%a, %d %b %Y %H:%M:%S +0000",
                              time.gmtime(data['datePublished']))
    mf = MdUtils(file_name=fileName, title=data['title'])
    showTxt = data['title'] + ' - ' + data['enclosureType']
    mf.new_line(mf.new_inline_link(link=data['enclosureUrl'], text=showTxt))
    mf.new_line(mf.new_inline_link(link=data['link'], text='Show Notes Link'))
    mf.new_line(f'Published: {published}')
    if 'image' in data:
        mf.new_paragraph(Html.image(path=data['image']))
    elif 'feedImage':
        mf.new_paragraph(Html.image(path=data['feedImage']))
    showTitle = "Podcast: " + podInfo['feed']['title']
    mf.new_header(1, showTitle)
    feedUrl = podInfo['feed']['url']
    podLink = f'[Original RSS Feed]({feedUrl})'
    mf.new_paragraph(podLink)
    mf.new_paragraph(data['description'])
    mf.new_paragraph('PodcastIndex.org ID: ' + str(data['id']))
    mf.read_md_file('postfooter.md')
    mf.file_data_text = mf.file_data_text.replace('somethingtoreplacehere777',
                                                  auth)
    print(f'Working on {showTxt}')
    return mf, fileName
Beispiel #2
0
def _markdown(checklist):
    """
    Generate markdown for checklist
    """

    checklist = json.load(open(checklist), object_pairs_hook=OrderedDict)

    mdFile = MdUtils(file_name='Ikigai-Checklist', title='PDP 2019 Checklist')

    mdFile.new_paragraph(checklist['overview'])

    sections = sorted(checklist['checklist'].values(),
                      key=lambda s: int(s['no']))
    for section in sections:
        mdFile.new_header(level=1, title=section['title'])
        for subject in section['subjects'].values():
            mdFile.new_header(level=2, title=subject['title'])
            mdFile.new_paragraph(subject['description'])
            mdFile.new_paragraph("Reference: " + subject['provisions'])
            mdFile.new_paragraph("Functions: " +
                                 ", ".join(subject['functions']))
            mdFile.new_paragraph("Groups: " + ", ".join(subject['groups']))

            actions = [['No', 'Description', 'Tags', 'Check']]
            actions += [[a['no'], a['description'], ", ".join(a['tags']), '']
                        for a in subject['actions'].values()]
            rows = len(actions)
            actions = flatten(actions)
            mdFile.new_table(columns=4,
                             rows=rows,
                             text=actions,
                             text_align='left')

    mdFile.create_md_file()
Beispiel #3
0
class generateMarkdown():
    def __repr__(self):
        return "<generateMarkdown(mdFile={0})>".format(self.mdFile)

    def __init__(self, repo):
        self.mdFile = MdUtils(file_name='CHANGELOG', title='CHANGELOG')
        self.mdFile.new_line(
            "## **Release** " + \
            "[{0}](https://github.com/nanocurrency/nano-node/tree/{0})"\
                .format(repo.end))
        self.mdFile.new_line("[Full Changelog](https://github.com/nanocurrency"\
            "/nano-node/compare/{0}...{1})".format(repo.start, repo.end))
        sort = self.pull_to_section(repo.commits)

        for section, prs in sort.items():
            self.write_header(section)
            for pr in prs:
                self.write_PR(pr, repo.commits[pr[0]])
        self.mdFile.create_md_file()

    def write_header(self, section):
        self.mdFile.new_line("---")
        self.mdFile.new_header(level=3, title=section)
        self.mdFile.new_line("|Pull Request|Title")
        self.mdFile.new_line("|:-:|:--")

    def write_PR(self, pr, info):
        imp = ""
        if pr[1]:
            imp = "**BREAKING** "
        self.mdFile.new_line("|[#{0}]({1})|{2}{3}".format(
            pr[0], info['Url'], imp, info['Title']))

    def handle_labels(self, labels):
        for section, values in SECTIONS.items():
            for label in labels:
                if label in values:
                    if any(string in labels for string in [
                            'breaking',
                    ]):
                        return section, True
                    else:
                        return section, False
        return 'Other', False

    def pull_to_section(self, commits):
        sect = copy.deepcopy(SECTIONS)
        result = {}
        for a in sect:
            sect[a] = []
        for pull, info in commits.items():
            section, important = self.handle_labels(info['labels'])
            if important:
                sect[section].insert(0, [pull, important])
            else:
                sect[section].append([pull, important])
        for a in sect:
            if len(sect[a]) > 0:
                result[a] = sect[a]
        return result
def generate_report():
    report = MdUtils(file_name=f'./report.md')
    report.new_header(level=1, title='Сегментация текста')
    report.new_line(text='Выполнил Ахманов Алексей Б18-514')
    report.new_line(text=f'Алфавит - {ALPHABET}')
    report.new_line(text=f'Исходная фраза - {PHRASE}')

    # Phrase
    phrase_image_path = f'{folder_helper.IMAGES_FOLDER_PATH}/phrase.png'
    phrase_projections_image_path = f'{folder_helper.IMAGES_FOLDER_PATH}/phrase_projections.png'
    phrase_segments_image_path = f'{folder_helper.IMAGES_FOLDER_PATH}/phrase_segments.png'
    phrase_result_image_path = f'{folder_helper.IMAGES_FOLDER_PATH}/phrase_result.png'

    phrase_image = Image.new(mode=GRAYSCALE_MODE,
                             size=(1200, FONT_SIZE),
                             color=WHITE)
    result = ImageDraw.Draw(im=phrase_image, mode=GRAYSCALE_MODE)
    result.text(xy=(0, 0), text=PHRASE, font=FONT, fill=0, anchor='lt')
    phrase_image = cut_empty_rows_and_cols(phrase_image)
    phrase_image.save(phrase_image_path)

    phrase_projections_image = draw.draw_projections(phrase_image)
    phrase_projections_image.save(phrase_projections_image_path)

    phrase_segments_image = draw.draw_symbol_segments(phrase_image,
                                                      SYMBOLS_DIFF_THRESHOLD)
    phrase_segments_image.save(phrase_segments_image_path)

    phrase_result_image = phrase_projections_image.copy()
    phrase_result_image.paste(im=phrase_segments_image, box=(30, 0))
    phrase_result_image.save(phrase_result_image_path)

    # Report
    report.new_header(level=2, title='Картинка с фразой')
    report.new_line(
        report.new_inline_image(text='Картинка с фразой',
                                path=phrase_image_path))

    report.new_header(level=2, title='Картинка с профилями')
    report.new_line(
        report.new_inline_image(text='Картинка с профилями',
                                path=phrase_projections_image_path))

    report.new_header(level=2, title='Картинка с сегментами символов')
    report.new_line(
        report.new_inline_image(text='Картинка с сегментами символов',
                                path=phrase_segments_image_path))

    report.new_header(level=2, title='Картинка с результатом')
    report.new_line(
        report.new_inline_image(text='Картинка с результатом',
                                path=phrase_result_image_path))

    report.create_md_file()
Beispiel #5
0
def create_markdown(github, owner):
    """ create markdown for repos in github dict
    """
    filename = 'prepbadge'
    print(f'Creating markdown file for {owner} repos in {filename}.md')
    md = MdUtils(file_name=filename, title='EdgeXFoundry Repo Badges Preview')
    for repo in github[0]['result']:
        md.new_header(level=1, title=md.new_inline_link(link=repo['github_url'], text=repo['name']))
        for badge in repo['badges']:
            md.write(f'{badge} ')
        md.new_line('')
    md.create_md_file()
    def write_to_markdown(self, filename: str):

        mdFile = MdUtils(
            file_name=self.OUTPUT_DIR + filename,
            title='Mycroft.ai commands (autogenerated from po files)')

        for plugin_name in self.translations:
            mdFile.new_header(level=1, title=plugin_name)
            mdFile.new_line()
            self.add_table(mdFile, self.translations[plugin_name])

        mdFile.create_md_file()
def generate_report():
    report = MdUtils(file_name=f'./report.md')
    report.new_header(level=1, title='Текстурный анализ')
    report.new_line(text='Выполнил Ахманов Алексей Б18-514')

    for im in IMAGES:
        image_path = f'{helpers.folder_helper.IMAGES_FOLDER_PATH}/{im}'

        processed_image_folder_path = f'{image_path}_processed'
        os.makedirs(processed_image_folder_path, exist_ok=True)

        series_length_matrix_image_path = f'{processed_image_folder_path}/{im}_series_length_matrix.png'

        image = Image.open(image_path).convert(constants.constants.RGB_MODE)
        grayscaled = grayscale.grayscale.mean_grayscale(image)

        # Series length matrix
        draw.draw.draw_series_length_matrix(grayscale.grayscale.mean_grayscale(image)).save(series_length_matrix_image_path)

        # SRE
        sre = texturing.texturing.sre_coefficient(grayscaled)

        # Report
        report.new_header(level=2, title=f'{im} (SRE = {sre})')

        report.new_header(level=3, title='Исходная картинка')
        report.new_line(report.new_inline_image(text='Исходная картинка', path=image_path))

        report.new_header(level=3, title='Матрица длин серий')
        report.new_line(report.new_inline_image(text='Матрица длин серий', path=series_length_matrix_image_path))

    report.create_md_file()
Beispiel #8
0
def write_designators(parts, keys, md: mdutils.MdUtils, sort_by='designator'):
    parts = sorted(parts, key=lambda i: i[sort_by])
    groups = groupby(parts, lambda i: i[sort_by])
    for designator, group in groups:
        #print('Designator {}'.format(designator))
        md.new_line()
        md.new_header(level=2, title=designator)
        sorted_parts = sorted(group, key=lambda i: i['id'])
        tabulated = list(keys)
        for part in sorted_parts:
            tabulated.extend([part.get(key) for key in keys])
        md.new_line()
        md.new_table(columns=len(keys),
                     rows=len(sorted_parts) + 1,
                     text=tabulated)
Beispiel #9
0
    def write_requirements(self, root_dir='.', md_file_name=None, format='md'):
        if not md_file_name:
            if self._current_tag:
                md_file_name = os.path.join(root_dir, self._current_tag, 'REQUIREMENTS.md')
            else:
                dev_or_stable = "dev" if self._dev else "stable"
                raise NoAppropriateVersionFoundException("No suitable version for " + dev_or_stable + "release")

        os.makedirs(os.path.dirname(md_file_name), exist_ok=True)
        requirements_md = MdUtils(file_name=md_file_name, title="Requirements Summary")

        for req_topic in self._requirements:
            requirements_md.new_header(level=1, title=req_topic)
            for req in self._requirements[req_topic]:
                impacted = self._current_tag in self._requirements_tag_map[req['number']]['tags'] if req['number'] in self._requirements_tag_map else False
                impacted_icon = ':boom:' if impacted else ''
                title = f"{req['title']} ([#{req['number']}](https://github.com/{self._repo}/issues/{req['number']})) {impacted_icon}"
                requirements_md.new_header(level=2, title=title)
                if impacted:
                    issue_lines = {t : [] for t in Requirements.ISSUE_TYPES}
                    for n in self._requirements_tag_map[req['number']]['issues']:
                        issue = self._repo.issue(n)
                        bug_or_enhancement = Requirements._issue_is_bug_or_enhancement(issue)
                        issue_lines[bug_or_enhancement].append(f'{issue.title} ([#{n}](https://github.com/{self._repo}/issues/{n}))')

                    for issue_type, issue_list in issue_lines.items():
                        if len(issue_lines[issue_type]):
                            requirements_md.new_paragraph(f'The {issue_type}s which impact this requirements are:')
                            requirements_md.new_list(issue_list)

                else:
                    requirements_md.new_paragraph('This requirement is not impacted by the current version')

        requirements_md.create_md_file()
        if format == 'md':
            return md_file_name
        if format == 'html':
            html_file_name = md_file_name.replace('.md', '.html')
            return md_to_html(md_file_name, html_file_name,
                              {'name': self._repo, 'description': self._repo.description, 'tag': self._current_tag})
        else:
            logger.error(f'output format {format} is not supported')
            return ''

        self._clean_previous_dev_requirements(root_dir)
def generate_report():
    report = MdUtils(file_name='./report.md')
    report.new_header(level=1, title='Фильтры и морфология')
    report.new_line(text='Выполнил Ахманов Алексей Б18-514')

    for im in IMAGES:
        image_path = f'{folder_helper.IMAGES_FOLDER_PATH}/{im}'

        processed_image_folder_path = f'{image_path}_processed'
        os.makedirs(processed_image_folder_path, exist_ok=True)

        image = Image.open(image_path).convert(constants.RGB_MODE)

        # Smoothing
        spatial_smoothed_image_path = f'{processed_image_folder_path}/{im}_spatial_smoothed.png'
        spatial_smoothed_difference_image_path = f'{processed_image_folder_path}/{im}_spatial_smoothed_difference.png'

        grayscaled = grayscale.mean_grayscale(image)
        filtration.spatial_smoothing(grayscaled).save(
            spatial_smoothed_image_path)
        filtration.spatial_smoothing_difference(grayscaled).save(
            spatial_smoothed_difference_image_path)

        # Report
        report.new_header(level=2, title=f'{im}')
        report.new_header(level=3, title='Исходная картинка')
        report.new_line(
            report.new_inline_image(text='Исходная картинка', path=image_path))

        report.new_header(level=3, title='Пространственное сглаживание')
        report.new_line(
            report.new_inline_image(text='Пространственное сглаживание',
                                    path=spatial_smoothed_image_path))

        report.new_header(level=3, title='Разница сглаженной и исходной')
        report.new_line(
            report.new_inline_image(
                text='Разница сглаженной и исходной',
                path=spatial_smoothed_difference_image_path))

    report.create_md_file()
Beispiel #11
0
def report(fails: Dict[str, List[Fail]]):
    print('Start writing report')
    start_time = time.time()
    mdFile = MdUtils(file_name='report', title='OCR Recognition Report')

    for fail_typ in fails.keys():
        mdFile.new_header(level=2,
                          title=fail_typ.title,
                          add_table_of_contents='n')
        mdFile.new_header(level=3,
                          title="Explanation",
                          add_table_of_contents='n')
        mdFile.new_paragraph(fail_typ.explanation)
        mdFile.new_line(
            f'There were in total {len(fails[fail_typ])} of {fail_typ.title}')
        mdFile.new_header(level=3, title="Fails", add_table_of_contents='n')
        mdFile.new_line()

        if fail_typ == LenFail:
            md_text: List[str] = [
                'Font name - size', 'Words in PDF', 'Recognized words',
                'Levenshtein distance of total text'
            ]
            for len_fail in fails[fail_typ]:
                md_text.extend([
                    f'{len_fail.font_name} - {len_fail.font_size}',
                    str(len_fail.pdf_len),
                    str(len_fail.ocr_len),
                    str(len_fail.total_levenshtein)
                ])

            mdFile.new_table(columns=4,
                             rows=int(len(md_text) / 4),
                             text=md_text,
                             text_align='center')
        else:
            for fail in fails[fail_typ]:
                mdFile = fail.to_md(mdFile)

    mdFile.create_md_file()
    print(f'Finished reporting after {time.time() - start_time} sec')
Beispiel #12
0
    def run(self):
        Responder.run(self)

        caseNumber = self.get_param('data.caseId')  #Friendly case number
        caseId = self.get_param('data.id')  #Raw case number
        case_observables = self.api.get_case_observables(caseId).json()
        title = self.get_param('data.title', None, 'title is missing')
        description = self.get_param('data.description', None,
                                     'description is missing')
        tags = self.get_param('data.tags')
        data = self.get_param('data')
        tlp = self.getTLP(data['tlp'])

        # Title
        #mdFile = MdUtils(file_name=str(caseNumber),title=tlp[0] + ' Case #' + str(caseNumber) + ': ' + title)
        mdFile = MdUtils(file_name=str(self.tmpPath) + str(caseNumber),
                         title=tlp[0] + ' Case #' + str(caseNumber) + ': ' +
                         title)

        # Case Summary
        caseSummary = self.getCaseSummary(data)
        mdFile.new_header(level=1, title='Case Summary')
        mdFile.new_line(str(tlp[1]))

        mdFile.new_table(columns=2,
                         rows=int(caseSummary.__len__() / 2),
                         text=caseSummary,
                         text_align='left')

        # Case Description
        mdFile.new_line('<div style="page-break-after: always;"></div>')
        mdFile.new_line(' ')
        mdFile.new_header(level=1, title='Case Description')
        mdFile.new_line(str(data['description']))
        mdFile.new_line(' ')

        # Task Log
        allTaskIds = self.getCaseTasks(caseId)
        allTaskIds_sorted = sorted(allTaskIds.items(),
                                   key=lambda x: x[1]['createdAt'])
        mdFile.new_header(level=1, title='Task Log Entries')

        for task in allTaskIds_sorted:
            title = str(task[1]['taskGroup'] + ' \: ' + task[1]['taskTitle'])
            createdAt = time.strftime(
                '%Y-%m-%dT%H:%M:%SZ',
                time.localtime(
                    task[1]['createdAt'] /
                    1000))  #Convert epoch ms to sec then human readable
            mdFile.new_header(level=2, title=title)
            mdFile.new_line(str('**Created At:** ') + str(createdAt))
            mdFile.new_line(
                str('**Created By:** ') + str(task[1]['createdBy']))
            mdFile.new_line(str('**Assigned To:** ') + str(task[1]['owner']))
            mdFile.new_line(str('**Case Status:** ') + str(task[1]['status']))
            mdFile.new_line(' ')
            mdFile.new_line(str('**Description:** '))
            mdFile.new_line(str(task[1]['description']))
            mdFile.new_line(' ')

            caseTaskLog = self.getCaseTaskLog(task[0])
            caseTaskLogEntries = (json.loads(caseTaskLog))
            caseTaskLogEntries_sorted = sorted(caseTaskLogEntries,
                                               key=lambda k: k['createdAt'])

            for caseTaskLogEntry in caseTaskLogEntries_sorted:

                createdAt = time.strftime(
                    '%Y-%m-%dT%H:%M:%SZ',
                    time.localtime(
                        caseTaskLogEntry['createdAt'] /
                        1000))  #Convert epoch ms to sec then human readable
                mdFile.new_line(
                    str(createdAt) + ' : ' + str(caseTaskLogEntry['message']))

        # Case Observables
        mdFile.new_header(level=1, title='Case Observables')
        caseObservables = self.getCaseObservables(case_observables)
        mdFile.new_table(columns=6,
                         rows=int(caseObservables.__len__() / 6),
                         text=caseObservables,
                         text_align='left')

        # TLP Protocol description
        mdFile.new_line('<div style="page-break-after: always;"></div>')
        mdFile.new_line(' ')
        mdFile.new_header(
            level=1,
            title='Traffic Light Protocol (TLP) Definitions and Usage')
        tlpFooter = self.getTlpFooter()
        mdFile.new_table(columns=3, rows=5, text=tlpFooter, text_align='left')

        # Build TOC
        mdFile.new_table_of_contents(table_title='Table of Contents', depth=2)

        # Compile the report
        mdFile.create_md_file()

        # Add the report to the case
        addTask = json.loads(self.addTask(caseId))
        taskId = addTask['_id']

        # Add the MD file to the task
        addTaskLog = json.loads(
            self.addTaskLog(taskId,
                            str(self.tmpPath) + str(caseNumber) + '.md'))

        # Cleanup the MD file
        os.remove(str(self.tmpPath) + str(caseNumber) + '.md')

        self.report({'report': 'created'})
Beispiel #13
0
def makeMarkdown(data, path):
    # Creates the README file
    global base_url

    mdf = MdUtils(file_name=path + 'README', title='RBA TechRadar for Azure')

    adopt_list = list()
    trial_list = list()
    assess_list = list()
    hold_list = list()
    reject_list = list()

    # Create categories on status
    for key in data:
        status = data[key].get("status")
        if status == "ADOPT": adopt_list.append(key)
        if status == "TRIAL": trial_list.append(key)
        if status == "ASSESS": assess_list.append(key)
        if status == "HOLD": hold_list.append(key)
        if status == "REJECT": reject_list.append(key)

    mdf.new_header(level=1, title='Overview')

    mdf.new_header(level=2, title='What is the purpose?')
    mdf.new_paragraph(
        "The RBA TechRadar for Azure is a tool to inspire and "
        "support engineering teams at Risk & Business Analytics to pick the best "
        "technologies for new projects; it provides a platform to share knowledge "
        "and experience in technologies, to reflect on technology decisions and "
        "continuously evolve our technology landscape.  Based on the pioneering "
        "work at Thought Works, our radar sets out the changes in technologies "
        "that are interesting in cloud development - changes that we think our "
        "engineering teams should pay attention to and consider using in their "
        "projects.")

    mdf.new_header(level=2, title='How do we maintain it?')
    mdf.new_paragraph(
        "The RBA TechRadar for Azure is maintained by the Cloud "
        "Center of Excellence - an open group of senior RBA technologists committed "
        "to devote time to this purpose.  The CCoE self organizes to maintain these "
        "documents, including this version.  Assignment of technologies to rings is "
        "the outcome of status change proposals, which are discussed and voted on "
        "in CCoE meetings.  The radar depends on active participation and input from "
        "all engineering teams at RBA.")

    mdf.new_header(level=2, title='What are the current ring assignments?')
    mdf.new_paragraph(
        "The RBA TechRadar for Azure is a list of technologies, "
        "complemented by an assesment result, called ring assignment.  We use five "
        "rings with the following semantics:")

    # Handle the Adopt Section
    mdf.new_header(level=3, title='Adopt')
    mdf.new_paragraph(
        "Technologies we have high confidence in to serve our "
        "purpose, also at large scale.  Technologies with a usage culture in the "
        "RBA production environment, low risk, automated policy enforcement and "
        "are recommended to be widely used.")

    adopt_tbl = [
        "<sub>Resource</sub>", "<sub>Description</sub>", "<sub>Type</sub>",
        "<sub>Status</sub>"
    ]
    adopt_cnt = len(adopt_list) + 1

    for key in adopt_list:
        resourceName = key
        resourceDesc = data[key].get("description", "")
        resourcePath = data[key].get("path", "")
        resourceType = data[key].get("architecture review", "").get("type", "")
        resourceUrl = data[key].get("url", "")
        resourceStatus = data[key].get("status", "")
        resourceName = "[" + resourceName + "](" + base_url + '/' + resourceUrl + ")"
        adopt_tbl.extend([
            '<sub>' + resourceName + '</sub>',
            '<sub>' + resourceDesc + '</sub>',
            '<sub>' + resourceType + '</sub>',
            '<sub>' + resourceStatus + '</sub>'
        ])

    if adopt_cnt == 1:
        mdf.new_line("")
        mdf.new_line("There are currently no resources at this ring level.",
                     bold_italics_code='bi',
                     color='red')
    else:
        mdf.new_line("")
        mdf.new_table(columns=4, rows=adopt_cnt, text=adopt_tbl)

    # Handle the Trial Section
    mdf.new_header(level=3, title='Trial')
    mdf.new_paragraph(
        "Technologies that we have seen work with success in projects "
        "to solve real problems;  first serious usage experience that confirm benefits "
        "and uncover limitations.  TRIAL technologies are slightly more risky; some "
        "engineers in our organization walked this path and will share knowledge and "
        "experiences.  This area can contain services that have been architecture and "
        "security reviewed but do not contain automated policy managmeent.")

    trial_tbl = [
        "<sub>Resource</sub>", "<sub>Description</sub>", "<sub>Type</sub>",
        "<sub>Status</sub>"
    ]
    trial_cnt = len(trial_list) + 1
    for key in trial_list:
        resourceName = key
        resourceDesc = data[key].get("description", "")
        resourcePath = data[key].get("path", "")
        resourceType = data[key].get("architecture review", "").get("type", "")
        resourceUrl = data[key].get("url", "")
        resourceStatus = data[key].get("status", "")
        resourceName = "[" + resourceName + "](" + base_url + '/' + resourceUrl + ")"
        trial_tbl.extend([
            '<sub>' + resourceName + '</sub>',
            '<sub>' + resourceDesc + '</sub>',
            '<sub>' + resourceType + '</sub>',
            '<sub>' + resourceStatus + '</sub>'
        ])

    if trial_cnt == 1:
        mdf.new_line("")
        mdf.new_line("There are currently no resources at this ring level.",
                     bold_italics_code='bi',
                     color='red')
    else:
        mdf.new_line("")
        mdf.new_table(columns=4, rows=trial_cnt, text=trial_tbl)

    # Handle the Assess Section
    mdf.new_header(level=3, title='Assess')
    mdf.new_paragraph(
        "Technologies that are promising and have clear potential "
        "value-add for us; technologies worth investing some research and "
        "prototyping efforts to see if it has impact.  ASSESS technologies have "
        "higher risks;  they are often new to our organization and highly unproven "
        "within RBA.  You will find some engineers that have knowledge in the "
        "technology and promote it, you may even find teams that have started "
        "a prototyping effort.  These technologies can also include services that "
        "are currently in architecture or security review.")

    assess_tbl = [
        "<sub>Resource</sub>", "<sub>Description</sub>", "<sub>Type</sub>",
        "<sub>Status</sub>"
    ]
    assess_cnt = len(assess_list) + 1
    for key in assess_list:
        resourceName = key
        resourceDesc = data[key].get("description", "")
        resourcePath = data[key].get("path", "")
        resourceType = data[key].get("architecture review", "").get("type", "")
        resourceUrl = data[key].get("url", "")
        resourceStatus = data[key].get("status", "")
        resourceName = "[" + resourceName + "](" + base_url + '/' + resourceUrl + ")"
        assess_tbl.extend([
            '<sub>' + resourceName + '</sub>',
            '<sub>' + resourceDesc + '</sub>',
            '<sub>' + resourceType + '</sub>',
            '<sub>' + resourceStatus + '</sub>'
        ])

    if assess_cnt == 1:
        mdf.new_line("")
        mdf.new_line("There are currently no resources at this ring level.",
                     bold_italics_code='bi',
                     color='red')
    else:
        mdf.new_line("")
        mdf.new_table(columns=4, rows=assess_cnt, text=assess_tbl)

    # Handle the Hold Section
    mdf.new_header(level=3, title='Hold')
    mdf.new_paragraph(
        "Technologies not recommended to be used for new projects. "
        "Technologies that we think are not (yet) worth to (further) invest in.  "
        "HOLD technologies should not be used for new projects, but usually can be "
        "continued for existing projects.  These technologies may include services "
        "that have yet to be evaluated by architecture and security due to a lack "
        "of interest, time, or need.")

    hold_tbl = [
        "<sub>Resource</sub>", "<sub>Description</sub>", "<sub>Type</sub>",
        "<sub>Status</sub>"
    ]

    hold_cnt = len(hold_list) + 1
    for key in hold_list:
        resourceName = key
        resourceDesc = data[key].get("description", "")
        resourcePath = data[key].get("path", "")
        resourceType = data[key].get("architecture review", "").get("type", "")
        resourceUrl = data[key].get("url", "")
        resourceStatus = data[key].get("status", "")
        #resourceName    = "["+resourceName+"]("+resourceUrl+")"
        hold_tbl.extend([
            '<sub>' + resourceName + '</sub>',
            '<sub>' + resourceDesc + '</sub>',
            '<sub>' + resourceType + '</sub>',
            '<sub>' + resourceStatus + '</sub>'
        ])

    if hold_cnt == 1:
        mdf.new_line("")
        mdf.new_line("There are currently no resources at this ring level.",
                     bold_italics_code='bi',
                     color='red')
    else:
        mdf.new_line("")
        mdf.new_table(columns=4, rows=hold_cnt, text=hold_tbl)

    # Handle the Reject Section
    mdf.new_header(level=3, title='Reject')
    mdf.new_paragraph(
        "Technologies not recommended to be used for any projects. "
        "Technologies that have undergone architecture and security review but do "
        "not meet company standards for use.  REJECT technologies should never be "
        "used on any project and should be considered deprecated for existing "
        "projects.")

    reject_tbl = [
        "<sub>Resource</sub>", "<sub>Description</sub>", "<sub>Type</sub>",
        "<sub>Status</sub>"
    ]
    reject_cnt = len(reject_list) + 1
    for key in reject_list:
        resourceName = key
        resourceDesc = data[key].get("description", "")
        resourcePath = data[key].get("path", "")
        resourceType = data[key].get("architecture review", "").get("type", "")
        resourceUrl = data[key].get("url", "")
        resourceStatus = data[key].get("status", "")
        #resourceName    = "["+resourceName+"]("+resourceUrl+")"
        reject_tbl.extend([
            '<sub>' + resourceName + '</sub>',
            '<sub>' + resourceDesc + '</sub>',
            '<sub>' + resourceType + '</sub>',
            '<sub>' + resourceStatus + '</sub>'
        ])

    if reject_cnt == 1:
        mdf.new_line("")
        mdf.new_line("There are currently no resources at this ring level.",
                     bold_italics_code='bi',
                     color='red')
    else:
        mdf.new_line("")
        mdf.new_table(columns=4, rows=reject_cnt, text=reject_tbl)

    mdf.create_md_file()
    # Somehow the MDUtils outputs 4 empty lines prior to the actual table in the target file.
    delete_four_head_lines(file_name)


# Output the overview table for the top-level README file
overview_file_name = 'results_overview.md'
mdfile = MdUtils(file_name=overview_file_name)

# Compose a mini-TOC
mdfile.new_line("Quick jump to sample type:")
mdfile.new_line("==========================")
for sample_type in sota_checkpoints_eval:
    header_name = SAMPLE_TYPE_TO_SAMPLE_DISPLAY_NAME_DICT[sample_type]
    mdfile.new_line("[{}]({})\n".format(header_name, header_name_to_link(header_name)))
mdfile.new_line()

for sample_type in sota_checkpoints_eval:
    mdfile.new_header(level=4, title=SAMPLE_TYPE_TO_SAMPLE_DISPLAY_NAME_DICT[sample_type])

    table_rows = get_results_table_rows(sota_checkpoints_eval[sample_type],
                                        measured_metrics,
                                        table_format='overview')
    write_table_to_md_file(mdfile, table_rows)
mdfile.create_md_file()
delete_four_head_lines(overview_file_name)

if args.output is not None:
    update_target_metrics(sota_checkpoints_eval, measured_metrics)
    with open(output, "w") as write_file:
        json.dump(sota_checkpoints_eval, write_file, indent=8)
Beispiel #15
0
    repo = node["issue"]["repository"]["nameWithOwner"]
    cur_datetime = str2time(node["issue"]["createdAt"])
    start_datetime = str2time(start_time)
    end_datetime = str2time(end_time)
    for target in target_list:
        if repo.find(target) != -1:
            if start_datetime <= cur_datetime <= end_datetime:
                if node['issue']['id'] not in comment_issue_ids:
                    comment_issue_ids.append(node['issue']['id'])
                    comment_issues.append(node)
                    break

mdFile = MdUtils(file_name='output', title='Weekly Report')

if len(open_issues) > 0:
    mdFile.new_header(level=2, title='Open Issues', add_table_of_contents='n')
    mdFile.new_list(
        map(
            lambda x: 'Open Issue ' + mdFile.new_inline_link(
                link=x['issue']['url'], text=x['issue']['title']),
            open_issues))

if len(open_prs) > 0:
    mdFile.new_header(level=2, title='Open PRs', add_table_of_contents='n')
    mdFile.new_list(
        map(
            lambda x: 'Open PR ' + mdFile.new_inline_link(
                link=x['pullRequest']['url'], text=x['pullRequest']['title']),
            open_prs))

if len(review_prs) > 0:
Beispiel #16
0
GROUPBY = md.metadata.get('groupBy')
SORTBY = md.metadata.get('sortBy')
# %%
entries = bib_database.entries
entries.sort(key=lambda x: x.get(GROUPBY, ''), reverse=True)
#%%
md_file = MdUtils(file_name=OUTPUT_PATH.stem, title=OUTPUT_PATH.stem)


# %%
def format_entry(md_file: MdUtils, entry: Dict):
    assert entry.get('title')
    formatted = FORMAT.format(TITLE=entry.get('title'),
                              AUTHOR=entry.get('author',
                                               '').replace(' and', ','),
                              YEAR=entry.get('year', ''),
                              JOURNAL=entry.get('journal', ''))
    md_file.new_line(formatted)


#%%
for key, group in groupby(entries, lambda x: x.get(GROUPBY, '')):
    md_file.new_header(level=1, title=f'{key}')
    for entry in sorted(group, key=lambda x: x.get(SORTBY, '')):
        format_entry(md_file=md_file, entry=entry)
    md_file.new_line()

# %%
md_file.create_md_file()
# %%
os.rename(f'./{OUTPUT_PATH.stem}.md', OUTPUT_PATH)
Beispiel #17
0
def generate_report():
    report = MdUtils(file_name='./report.md')
    report.new_header(level=1, title='Бинаризация')
    report.new_line(text='Выполнил Ахманов Алексей Б18-514')

    for im in IMAGES:
        image_path = f'{folder_helper.IMAGES_FOLDER_PATH}/{im}'

        processed_image_folder_path = f'{image_path}_processed'
        os.makedirs(processed_image_folder_path, exist_ok=True)

        image = Image.open(image_path).convert(constants.RGB_MODE)

        # Sampling
        upsampled_integer_number_of_times_image_path = f'{processed_image_folder_path}/{im}_upsampled_m.png'
        downsampled_integer_number_of_times_image_path = f'{processed_image_folder_path}/{im}_downsampled_n.png'
        oversampled_two_pass_image_path = f'{processed_image_folder_path}/{im}_oversampled_two_pass.png'
        oversampled_one_pass_image_path = f'{processed_image_folder_path}/{im}_oversampled_one_pass.png'

        sampling.bilinear_interpolation_upsampling(
            image,
            UPSAMPLE_FACTOR).save(upsampled_integer_number_of_times_image_path)
        sampling.decimation_downsampling(image, DOWNSAMPLE_FACTOR).save(
            downsampled_integer_number_of_times_image_path)
        sampling.decimation_downsampling(
            sampling.bilinear_interpolation_upsampling(image, UPSAMPLE_FACTOR),
            DOWNSAMPLE_FACTOR).save(oversampled_two_pass_image_path)
        sampling.one_pass_resampling(
            image, UPSAMPLE_FACTOR,
            DOWNSAMPLE_FACTOR).save(oversampled_one_pass_image_path)

        # Grayscale
        mean_grayscaled_image_path = f'{processed_image_folder_path}/{im}_mean_grayscaled.png'
        photoshop_grayscaled_image_path = f'{processed_image_folder_path}/{im}_photoshop_grayscaled.png'

        grayscale.mean_grayscale(image).save(mean_grayscaled_image_path)
        grayscale.photoshop_grayscale(image).save(
            photoshop_grayscaled_image_path)

        # Threshold
        balansed_hist_thresholded_image_path = f'{processed_image_folder_path}/{im}_balansed_hist_thresholded.png'

        thresholding.balansed_histogram_method(grayscale.mean_grayscale(
            image)).save(balansed_hist_thresholded_image_path)

        # Report
        report.new_header(level=2, title=f'{im}')
        report.new_header(level=3, title='Исходная картинка')
        report.new_line(
            report.new_inline_image(text='Исходная картинка', path=image_path))

        report.new_header(
            level=3, title=f'Интерполяция с коэффициентом {UPSAMPLE_FACTOR}')
        report.new_line(
            report.new_inline_image(
                text=f'Интерполяция с коэффициентом {UPSAMPLE_FACTOR}',
                path=upsampled_integer_number_of_times_image_path))

        report.new_header(
            level=3, title=f'Децимация с коэффициентом {DOWNSAMPLE_FACTOR}')
        report.new_line(
            report.new_inline_image(
                text=f'Децимация с коэффициентом {DOWNSAMPLE_FACTOR}',
                path=downsampled_integer_number_of_times_image_path))

        report.new_header(
            level=3,
            title=
            f'Двухпроходная передескритизация с коэффициентом {UPSAMPLE_FACTOR}/{DOWNSAMPLE_FACTOR}'
        )
        report.new_line(
            report.new_inline_image(
                text=
                f'Двухпроходная передескритизация с коэффициентом {UPSAMPLE_FACTOR}/{DOWNSAMPLE_FACTOR}',
                path=oversampled_two_pass_image_path))

        report.new_header(
            level=3,
            title=
            f'Однопроходная передескритизация с коэффициентом {UPSAMPLE_FACTOR}/{DOWNSAMPLE_FACTOR}'
        )
        report.new_line(
            report.new_inline_image(
                text=
                f'Однопроходная передескритизация с коэффициентом {UPSAMPLE_FACTOR}/{DOWNSAMPLE_FACTOR}',
                path=oversampled_two_pass_image_path))

        report.new_header(level=3, title=f'Оттенки серого')
        report.new_line(
            report.new_inline_image(text=f'Оттенки серого',
                                    path=mean_grayscaled_image_path))

        report.new_header(level=3, title=f'Оттенки серого (как в Photoshop)')
        report.new_line(
            report.new_inline_image(text=f'Оттенки серого (как в Photoshop)',
                                    path=photoshop_grayscaled_image_path))

        report.new_header(level=3,
                          title=f'Бинаризация (балансировка гистограммы)')
        report.new_line(
            report.new_inline_image(
                text=f'Бинаризация (балансировка гистограммы)',
                path=balansed_hist_thresholded_image_path))

    report.create_md_file()
Beispiel #18
0
    def to_markdown(self):
        """Generate markdown file."""
        params = self._construct()

        markdown = MdUtils(file_name=params._name)
        markdown.new_header(level=1, title=str(params._name).capitalize())
        markdown.new_header(level=2, title="Description")
        markdown.new_paragraph(params._desc_feature_set)
        markdown.new_line()
        markdown.new_header(level=2, title="Feature Set Pipeline")
        markdown.new_header(level=3, title="Source")

        source = ["Reader", "Location"]
        for r, l in params._source:
            source.extend([r, l])

        count_rows = len(source) // 2

        markdown.new_table(columns=2,
                           rows=count_rows,
                           text=source,
                           text_align="center")
        markdown.new_header(level=3, title="Sink")

        sink = ["Writer"]
        for w in params._sink:
            sink.extend([w])

        count_rows = len(sink)

        markdown.new_table(columns=1,
                           rows=count_rows,
                           text=sink,
                           text_align="center")
        markdown.new_header(level=3, title="Features")

        features = ["Column name", "Data type", "Description"]
        for c, desc in params._features:
            features.extend([c["column_name"], str(c["type"]), desc])

        count_rows = len(features) // 3

        markdown.new_table(columns=3,
                           rows=count_rows,
                           text=features,
                           text_align="center")

        if self.save:
            markdown.create_md_file()
        else:
            return markdown.file_data_text
def generate_report():
    report = MdUtils(file_name='./report.md')
    report.new_header(level=1, title='Контуры')
    report.new_line(text='Выполнил Ахманов Алексей Б18-514')

    for im in IMAGES:
        image_path = f'{folder_helper.IMAGES_FOLDER_PATH}/{im}'

        processed_image_folder_path = f'{image_path}_processed'
        os.makedirs(processed_image_folder_path, exist_ok=True)

        image = Image.open(image_path).convert(constants.RGB_MODE)

        # Outline
        smoothed_image_path = f'{processed_image_folder_path}/{im}_grayscaled.png'
        roberts_cross_x_image_path = f'{processed_image_folder_path}/{im}_roberts_cross_x.png'
        roberts_cross_y_image_path = f'{processed_image_folder_path}/{im}_roberts_cross_y.png'
        roberts_cross_image_path = f'{processed_image_folder_path}/{im}_roberts_cross.png'
        roberts_cross_normalized_image_path = f'{processed_image_folder_path}/{im}_roberts_cross_normalized_#.png'

        grayscaled = grayscale.mean_grayscale(image)
        smoothed = filtration.spatial_smoothing(grayscaled)
        smoothed.save(smoothed_image_path)

        filtration.roberts_cross_x(smoothed).save(roberts_cross_x_image_path)
        filtration.roberts_cross_y(smoothed).save(roberts_cross_y_image_path)
        filtration.roberts_cross(smoothed).save(roberts_cross_image_path)

        for i in range(10, 31):
            filtration.roberts_cross_threshold(smoothed, i).save(
                roberts_cross_normalized_image_path.replace('#', f'{i}'))

        # Report
        report.new_header(level=2, title=f'{im}')
        report.new_header(level=3, title='Исходная картинка')
        report.new_line(
            report.new_inline_image(text='Исходная картинка', path=image_path))

        report.new_header(level=3, title='Сглаженные оттенки серого')
        report.new_line(
            report.new_inline_image(text='Сглаженные оттенки серого',
                                    path=smoothed_image_path))

        report.new_header(level=3, title='Оператор Робертса 2x2 (x)')
        report.new_line(
            report.new_inline_image(text='Оператор Робертса 2x2 (x)',
                                    path=roberts_cross_x_image_path))

        report.new_header(level=3, title='Оператор Робертса 2x2 (y)')
        report.new_line(
            report.new_inline_image(text='Оператор Робертса 2x2 (y)',
                                    path=roberts_cross_y_image_path))

        report.new_header(level=3, title='Оператор Робертса 2x2')
        report.new_line(
            report.new_inline_image(text='Оператор Робертса 2x2',
                                    path=roberts_cross_image_path))

        report.new_header(level=3,
                          title='Оператор Робертса 2x2 (нормализованная)')
        for i in range(10, 31):
            report.new_header(level=4, title=f'Порог {i}')
            report.new_line(
                report.new_inline_image(
                    text=f'Порог {i}',
                    path=roberts_cross_normalized_image_path.replace(
                        '#', f'{i}')))

    report.create_md_file()
Beispiel #20
0
def create_report(res_files: [], output: str, title="", description=""):
    res = []
    for f in res_files:
        with open(f) as json_file:
            res.append(json.load(json_file))

    mdFile = MdUtils(file_name=output, title=title)
    if description:
        mdFile.new_paragraph(description, bold_italics_code='b')

    mdFile.new_header(level=1, title='Platform')

    platform = res[0]['platform']
    list_of_strings = ["System", "Information"]
    rows = 1
    for key in platform.keys():
        list_of_strings.extend([key, str(platform[key])])
        rows += 1
    mdFile.new_line()
    mdFile.new_table(columns=2,
                     rows=rows,
                     text=list_of_strings,
                     text_align='left')

    mdFile.new_header(level=1, title='Test results')
    tests = res[0]['tests']
    for test_key in tests.keys():
        mdFile.new_header(level=2, title=test_key)
        mdFile.new_line('~~~')
        mdFile.new_line(tests[test_key]['command'])
        mdFile.new_line('~~~')

        test_res = tests[test_key]['result']
        list_of_strings = ['Test'] + list(test_res.keys())
        cols = len(list_of_strings)
        rows = 1
        avg = {}
        avgc = {}
        for r in res:
            list_of_strings.extend(
                [r['name'] + " (" + r['iteration'] + ")"] +
                list(r['tests'][test_key]['result'].values()))
            rows += 1
            if r['name'] not in avg.keys():
                avg[r['name']] = list(r['tests'][test_key]['result'].values())
                avgc[r['name']] = 1
            else:
                avg[r['name']] = [
                    str(float(x) + float(y)) for x, y in zip(
                        avg[r['name']],
                        list(r['tests'][test_key]['result'].values()))
                ]
                avgc[r['name']] += 1

        for avg_key in avg.keys():
            avg[avg_key] = [
                str(round(float(x) / avgc[avg_key], 3)) for x in avg[avg_key]
            ]
            list_of_strings.extend([avg_key + " (avg)"] + avg[avg_key])
            rows += 1

        mdFile.new_table(columns=cols,
                         rows=rows,
                         text=list_of_strings,
                         text_align='left')
        mdFile.new_line()

    mdFile.create_md_file()
    print("{}.md succesfully created".format(output))
Beispiel #21
0
def generate_report():
    report = MdUtils(file_name=f'./report.md')
    report.new_header(level=1, title='Контрастирование')
    report.new_line(text='Выполнил Ахманов Алексей Б18-514')

    for im in IMAGES:
        image_path = f'{helpers.folder_helper.IMAGES_FOLDER_PATH}/{im}'

        processed_image_folder_path = f'{image_path}_processed'
        os.makedirs(processed_image_folder_path, exist_ok=True)

        grayscaled_image_path = f'{processed_image_folder_path}/{im}_grayscaled.png'
        linear_contrasted_image_path = f'{processed_image_folder_path}/{im}_linear_contrasted.png'
        power_transformed_image_path = f'{processed_image_folder_path}/{im}_power_transformed_gamma=#.png'

        image = Image.open(fp=image_path).convert(constants.RGB_MODE)
        grayscaled = grayscale.mean_grayscale(image)

        grayscaled.save(grayscaled_image_path)

        # Contrasting
        enhancement.contrasting.linear_contrasting(grayscaled).save(
            linear_contrasted_image_path)
        for gamma in numpy.arange(START_GAMMA, STOP_GAMMA, STEP_GAMMA):
            gamma = round(gamma, GAMMA_ROUND_SIGNS)
            enhancement.contrasting.power_transformation(grayscaled, gamma=gamma)\
                .save(power_transformed_image_path.replace('#', str(gamma)))

        # Report
        report.new_header(level=2, title=f'{im}')

        report.new_header(level=3, title='Исходная картинка')
        report.new_line(
            report.new_inline_image(text='Исходная картинка', path=image_path))

        report.new_header(level=3, title='Оттенки серого')
        report.new_line(
            report.new_inline_image(text='Оттенки серого',
                                    path=grayscaled_image_path))

        report.new_header(level=3, title='Линейное контрастирование')
        report.new_line(
            report.new_inline_image(text='Линейное контрастирование',
                                    path=linear_contrasted_image_path))

        report.new_header(level=3, title='Степенное преобразование')
        for gamma in numpy.arange(START_GAMMA, STOP_GAMMA, STEP_GAMMA):
            gamma = round(gamma, GAMMA_ROUND_SIGNS)
            report.new_header(level=4, title=f'Gamma = {gamma}')
            report.new_line(
                report.new_inline_image(
                    text='Степенное преобразование',
                    path=power_transformed_image_path.replace('#',
                                                              str(gamma))))

    report.create_md_file()
Beispiel #22
0
def generate_report():
    def generate_phrase_image(width: int, font: ImageFont) -> Image:
        result = Image.new(mode=GRAYSCALE_MODE,
                           size=(width, font.size),
                           color=WHITE)
        result_draw = ImageDraw.Draw(im=result, mode=GRAYSCALE_MODE)
        result_draw.text(xy=(0, 0),
                         text=PHRASE,
                         font=font,
                         fill=0,
                         anchor='lt')
        return cut_empty_rows_and_cols(result)

    def p_ass_for_table(p_ass):
        result = []
        for row in p_ass:
            for element in row:
                result.append(str(element))

        return result

    report = MdUtils(file_name=f'./report.md')
    report.new_header(level=1, title='Классификация')
    report.new_line(text='Выполнил Ахманов Алексей Б18-514')
    report.new_line(text=f'Алфавит - {ALPHABET}')
    report.new_line(text=f'Исходная фраза - {PHRASE}')
    report.new_line(text=f'Размер шрифта - {FONT_SIZE}')

    # Phrase
    phrase_image = generate_phrase_image(
        1200, ImageFont.truetype(font=FONT_PATH, size=FONT_SIZE))
    phrase_image_small_font = generate_phrase_image(
        1200, ImageFont.truetype(font=FONT_PATH, size=SMALL_FONT_SIZE))

    # Proximity assessment
    p_assessment = proximity_assessment(image=phrase_image,
                                        diff_threshold=SYMBOLS_DIFF_THRESHOLD,
                                        phrase=PHRASE)
    p_assessment_small_font = proximity_assessment(
        image=phrase_image_small_font,
        diff_threshold=SYMBOLS_DIFF_THRESHOLD,
        phrase=PHRASE)

    p_assessment_for_table = p_ass_for_table(p_assessment)
    p_assessment_small_font_for_table = p_ass_for_table(
        p_assessment_small_font)

    # Report
    report.new_header(level=2,
                      title=f'Оценка близости для размера шрифта {FONT_SIZE}')

    rows = len(p_assessment)
    columns = len(p_assessment[0]) if len(p_assessment) > 0 else 0
    report.new_table(columns=columns, rows=rows, text=p_assessment_for_table)

    report.new_header(
        level=2, title=f'Оценка близости для размера шрифта {SMALL_FONT_SIZE}')

    rows = len(p_assessment_small_font)
    columns = len(
        p_assessment_small_font[0]) if len(p_assessment_small_font) > 0 else 0
    report.new_table(columns=columns,
                     rows=rows,
                     text=p_assessment_small_font_for_table)

    report.new_line(
        text=
        'Так как были использованы нормализованные параметры для оценки близости символов, размер шрифта не влияет на результат'
    )

    report.create_md_file()