示例#1
0
def build_field_values(geol_units: gpd.GeoDataFrame):
    for i in geol_units.columns:
        if i in fields.OMITTED_FIELDS:
            continue
        column = (geol_units[i].value_counts().reset_index().sort_values(
            [i, "index"], ascending=[False, True]).set_index("index"))
        if column.shape[0] > 200:
            continue

        mdfile = mdutils.MdUtils(
            file_name=os.path.join(fp.FIELD_VALS_DIR, f"{i}_values.md"),
            title=f"Unique values of {i}",
        )

        table_header = ["Value", "Number of Occurrences"]
        # to populate a table, mdutils needs a flat list of all values divisible by m columns
        # chain.from_iterable flattens the tupled output of zip()
        table_header.extend(chain.from_iterable(zip(column.index, column[i])))

        mdfile.new_table(columns=2,
                         rows=column.shape[0] + 1,
                         text=table_header,
                         text_align="center")

        mdfile.create_md_file()
示例#2
0
def build_field_glossary(feature_class: gpd.GeoDataFrame):
    field_descr = pd.read_csv(fp.FIELD_DESCR_PATH).fillna("")
    mdfile = mdutils.MdUtils(file_name=fp.GEOL_GLOSSARY_PATH,
                             author="Samuel Elkind")
    mdfile.new_header(1, title="Geological Units Field Glossary")
    for i in feature_class.columns:
        if i in src.fields.OMITTED_FIELDS:
            continue
        try:
            value_counts = feature_class[i].value_counts()
        except AttributeError:
            continue

        record = field_descr.loc[field_descr["field_name"] == i]
        output = create_output(record, i)

        mdfile.new_header(2, i)
        for j in output:
            if not output[j]:
                continue
            output[j] = format_output(j, output[j], i)
            mdfile.new_line(f"{j}:", bold_italics_code="b")
            mdfile.new_line("")
            mdfile.new_line(text=output[j])
            mdfile.new_line("")

        mdfile.new_line("More Information:", bold_italics_code="b")
        mdfile.new_line()

        stats = create_stats(value_counts)
        mdfile.new_list([f"{k}: {stats[k]}" for k in stats])

    mdfile.create_md_file()
示例#3
0
def create_markdown(destination_filename=os.path.join(Cfg.destination_dir,
                                                      Cfg.destination_file),
                    with_hints=False,
                    with_solutions=False):
    # Create file name
    if with_hints:
        destination_filename += '_with_hints'
    if with_solutions:
        destination_filename += '_with_solutions'

    # Initialise file
    mdfile = mdutils.MdUtils(file_name=destination_filename)

    # Add headers
    mdfile.write(HEADERS["header"] + '\n')
    mdfile.write(HEADERS["sub_header"] + '\n')

    # Add questions (and hint or answers if required)
    for n in range(1, 101):
        question = QHA[f'q{n}']
        if question.strip() == '': continue  #skip, if no question text
        mdfile.new_header(title=f"{n}. {question}", level=4)
        if with_hints:
            mdfile.write(f"`{QHA[f'h{n}']}`")
        if with_solutions:
            mdfile.insert_code(QHA[f'a{n}'], language='python')

    # Delete file if one with the same name is found
    if os.path.exists(destination_filename):
        os.remove(destination_filename)

    # Write sequence to file
    mdfile.create_md_file()
示例#4
0
def create_markdown(destination_filename='100_Numpy_exercises',
                    with_hints=False,
                    with_solutions=False):
    # Create file name
    if with_hints:
        destination_filename += '_with_hints'
    if with_solutions:
        destination_filename += '_with_solutions'

    # Initialise file
    mdfile = mdutils.MdUtils(file_name=destination_filename)

    # Add headers
    mdfile.write(HEADERS["header"] + '\n')
    mdfile.write(HEADERS["sub_header"] + '\n')

    # Add questions (and hint or answers if required)
    for n in range(1, 101):
        mdfile.new_header(title=f"{n}. {QHA[f'q{n}']}", level=4)
        if with_hints:
            mdfile.write(f"`{QHA[f'h{n}']}`")
        if with_solutions:
            mdfile.insert_code(QHA[f'a{n}'], language='python')

    # Delete file if one with the same name is found
    if os.path.exists(destination_filename):
        os.remove(destination_filename)

    # Write sequence to file
    mdfile.create_md_file()
示例#5
0
def build_quality_info_field_glossary(quality_info: gpd.GeoDataFrame):
    field_descr = pd.read_csv(fp.QUALINFO_FIELD_DESCR_PATH).fillna("")

    mdfile = mdutils.MdUtils(file_name=fp.QUALINFO_GLOSSARY_PATH,
                             author="Samuel Elkind")

    mdfile.new_header(1, title="Quality Information Field Glossary")

    for i in quality_info.columns:
        if i in src.fields.OMITTED_FIELDS:
            continue

        record = field_descr.loc[field_descr["field_name"] == i]

        mdfile.new_header(2, i)

        for j, k in [
            ("field_description", "Field Description"),
            ("value_formatting", "Formatting of Values"),
        ]:
            if not record[j].iloc[0]:
                continue
            mdfile.new_line(f"{k}:", bold_italics_code="b")
            mdfile.new_line("")
            mdfile.new_line(text=record[j].iloc[0])
            mdfile.new_line("")

    mdfile.create_md_file()
示例#6
0
def build_faults_field_glossary(faults: gpd.GeoDataFrame):
    field_descr = pd.read_csv(fp.FAULTS_FIELD_DESCR_PATH).fillna("")

    mdfile = mdutils.MdUtils(file_name=fp.FAULTS_GLOSSARY_PATH,
                             author="Samuel Elkind")

    mdfile.new_header(1, title="Faults Field Glossary")

    for i in faults.columns:
        if i in src.fields.OMITTED_FIELDS:
            continue

        record = field_descr.loc[field_descr["field_name"] == i]
        output = create_output(record, i)

        mdfile.new_header(2, i)

        for j in output:
            if not output[j] or j == "Field Values":
                continue
            output[j] = format_output(j, output[j], i)
            mdfile.new_line(f"{j}:", bold_italics_code="b")
            mdfile.new_line("")
            mdfile.new_line(text=output[j])
            mdfile.new_line("")

    mdfile.create_md_file()
示例#7
0
def build_source_field_glossary(sources: gpd.GeoDataFrame):
    field_descr = pd.read_csv(fp.SOURCE_FIELD_DESCR_PATH).fillna("")

    mdfile = mdutils.MdUtils(file_name=fp.SOURCE_GLOSSARY_PATH, author="Samuel Elkind")

    mdfile.new_header(1, title="Sources Field Glossary")

    for i in sources.columns:
        if i in src.fields.OMITTED_FIELDS:
            continue

        record = field_descr.loc[field_descr["field_name"] == i]

        mdfile.new_header(2, i)

        for j in [
            ("field_description", "Field Description"),
            ("value_formatting", "Formatting of Values"),
        ]:
            mdfile.new_line(f"{j[1]}:", bold_italics_code="b")
            mdfile.new_line("")
            mdfile.new_line(text=record[j[0]].iloc[0])
            mdfile.new_line("")

    mdfile.create_md_file()
示例#8
0
def update_index(root_dir, herds):
    index_file_name = os.path.join(root_dir, 'index.md')
    index_md_file = mdutils.MdUtils(
        file_name=index_file_name,
        title=f'PDS Engineering Node software suite, builds')

    herds.sort(key=lambda x: x.get_release_datetime(), reverse=True)
    herds_iterator = iter(herds)

    now = datetime.now()

    # dev/uit releases
    table_development_releases = HerdTable(
        ["build", "planned release", "update"])
    while True:
        herd = next(herds_iterator)
        if herd.get_release_datetime() > now:
            table_development_releases.add_herd(herd)
        else:
            break

    # stable release
    table_latest_stable_release = HerdTable(["build", "release", "update"])
    table_latest_stable_release.add_herd(herd)

    # archived releases
    table_archived_releases = HerdTable(["build", "release", "update"])
    while True:
        try:
            herd = next(herds_iterator)
            table_archived_releases.add_herd(herd)
        except StopIteration as e:
            break

    if len(table_latest_stable_release):
        index_md_file.new_paragraph("Latest stable release:")
        index_md_file.new_line('')
        table_latest_stable_release.write_to_md_file(index_md_file)

    if len(table_development_releases):
        index_md_file.new_line('')
        index_md_file.new_paragraph("Development releases:")
        index_md_file.new_line('')
        table_development_releases.write_to_md_file(index_md_file)

    if len(table_archived_releases):
        index_md_file.new_line('')
        index_md_file.new_paragraph("Archived stable releases:")
        index_md_file.new_line('')
        table_archived_releases.write_to_md_file(index_md_file)

    img = index_md_file.new_inline_image(
        'new PDS logo test',
        'https://nasa-pds.github.io/pdsen-corral/images/logo.png')
    index_md_file.new_line(img)

    index_md_file.create_md_file()
示例#9
0
    def convert(self,
                destination_file: PosixPath,
                subset_numbered_keys: Optional[str] = None):

        ktx_dict = self.getter.get_dict()
        destination_file = Path(destination_file)

        getter_tag = self.getter.get_getter_tag()
        format_tag = self.get_format_tag()

        # - Initialise file
        md_file = mdutils.MdUtils(file_name=str(destination_file))

        # - Write header if any:
        for hdr_key in self.getter.get_headers_keys():
            prefix, suffix, add_key = keys_to_decorations(
                getter_tag, format_tag, hdr_key)
            if add_key:
                prefix += f"{hdr_key}. "
            md_file.write(prefix + ktx_dict[hdr_key] + suffix)

        # - Write numbered keys if any:
        n_keys = self.getter.get_quantity_numbered_keys()
        numbered_keys = self.getter.get_numbered_keys()

        if isinstance(numbered_keys, dict):
            numbered_keys = numbered_keys[subset_numbered_keys]

        num_numbered_keys_found = 0
        for n in range(n_keys[0], n_keys[1] + 1):
            for key in numbered_keys:
                prefix, suffix, add_key = keys_to_decorations(
                    getter_tag, format_tag, key)
                nmb_key = f"{key}{n}"
                if add_key:
                    prefix += f"{n}. "
                if nmb_key in ktx_dict.keys():
                    num_numbered_keys_found += 1
                    md_file.write(prefix + ktx_dict[nmb_key] + suffix)

        # Delete file if one with the same name is found
        if destination_file.exists():
            destination_file.unlink()

        # Write sequence to file
        md_file.create_md_file()

        print(
            f"File {destination_file} created with {num_numbered_keys_found} numbered keys."
        )
示例#10
0
def build_bibliography(sources: gpd.GeoDataFrame):
    pub_paper_file = mdutils.MdUtils(file_name=fp.PUB_PAPER_REF_PATH,
                                     author="Samuel Elkind")
    pub_map_file = mdutils.MdUtils(file_name=fp.PUB_MAP_REF_PATH,
                                   author="Samuel Elkind")
    gis_file = mdutils.MdUtils(file_name=fp.GIS_REF_PATH,
                               author="Samuel Elkind")
    thesis_file = mdutils.MdUtils(file_name=fp.THESIS_REF_PATH,
                                  author="Samuel Elkind")
    unpub_file = mdutils.MdUtils(file_name=fp.UNPUB_REF_PATH,
                                 author="Samuel Elkind")
    unk_file = mdutils.MdUtils(file_name=fp.UNK_REF_PATH,
                               author="Samuel Elkind")

    for i in [
        ("Published paper", pub_paper_file, "Published Paper"),
        ("Published map", pub_map_file, "Published Map"),
        ("GIS dataset", gis_file, "GIS Dataset"),
        ("Thesis", thesis_file, "Thesis"),
        ("Unpublished", unpub_file, "Unpublished"),
        ("Unknown", unk_file, "Unknown"),
    ]:

        works = sources[sources["PUBTYPE"] == i[0]].fillna(0)
        works_citations = make_citations(works)

        i[1].new_header(1, title=f"{i[2]} Works Referenced")
        for j in sorted(
                works_citations,
                key=lambda x: (
                    x[:find_first_digit(x)],
                    x[find_first_digit(x):],  # noqa: E203
                ),
        ):
            i[1].new_header(2, title=j)

            i[1].new_line("Bibtex citation", bold_italics_code="b")
            i[1].insert_code(works_citations[j]["bibtex"])

            if i[0] not in ["Unpublished", "Unknown"]:

                i[1].new_line(
                    mdutils.tools.Link.Inline.new_link(
                        link=works_citations[j]["scholar_link"],
                        text="Google Scholar Link",
                    ),
                    bold_italics_code="b",
                )

        i[1].create_md_file()
示例#11
0
 def on_files(self, files, config):
     for root, dirs, filenames in os.walk("."):
         for filename in filenames:
             if filename.endswith(".yaml"):
                 new_filename = os.path.join(root, filename)
                 markdown_filename = self.generate_yaml_markdown(
                     new_filename, config
                 )
                 if markdown_filename is not None:
                     f = File(
                         markdown_filename,
                         config["docs_dir"],
                         config["site_dir"],
                         False,
                     )
                     files.append(f)
     mdFile = mdutils.MdUtils(
         file_name=config["docs_dir"] + "/" + self.table_filename
     )
     mdFile.new_header(level=1, title="Tests index")
     for table_name, test_table in self.test_tables.items():
         mdFile.new_header(level=2, title='<span class="tag">%s</span>' % table_name)
         mdFile.new_line("| Name | Description | Scope |")
         mdFile.new_line("| --- | --- | --- |")
         for row in sorted(test_table, key=lambda item: item["name"]):
             mdFile.new_line(
                 "| %s | %s | %s |"
                 % (row["name"], row["description"].replace("\n", ""), row["scope"])
             )
         mdFile.new_line("")
     mdFile.create_md_file()
     newfile = File(
         path=str(self.table_filename) + ".md",
         src_dir=config["docs_dir"],
         dest_dir=config["site_dir"],
         use_directory_urls=False,
     )
     files.append(newfile)
     return files
示例#12
0
import sys
import json
import mdutils

postman_file = sys.argv[1]

mdFile = mdutils.MdUtils(file_name='Documentation API')

with open(postman_file) as json_file:
    data = json.load(json_file)

    mdFile.new_header(
        level=1,
        title=data['info']['name'])  # style is set 'atx' format by default.
    mdFile.new_paragraph(
        data['info']['description'])  # style is set 'atx' format by default.

    mdFile.new_line('****')

    for route in data['item']:
        mdFile.insert_code("+ " + route['request']['method'] + " " +
                           route['name'],
                           language='diff')
        mdFile.new_paragraph(route['request']['description'])

        try:
            mdFile.new_header(level=2, title="Headers:")
            for header in route['request']['header']:
                mdFile.new_paragraph(">Key: " + header['key'] + "\n" + "| " +
                                     header['value'])
        except (KeyError):
示例#13
0
def main():
    faults = gpd.read_file(fp.GEOL_PATH, layer="ATA_faults").fillna("")

    field_values = pd.read_csv(fp.RESTRICTED_VALS_PATH).fillna("")

    field_map = {
        "Accuracy": {
            "fields": ["ACCURACY"],
            "fp": fp.ACCURACY_PATH
        },
        "Fault Type": {
            "fields": ["TYPE"],
            "fp": fp.FAULT_TYPE_PATH
        },
        "Fault Sense": {
            "fields": ["DOMSENSE", "SUBSENSE"],
            "fp": fp.FAULT_SENSE_PATH
        },
        "Down-thrown Side": {
            "fields": ["DOWNQUAD"],
            "fp": fp.DOWN_THROWN_PATH
        },
        "Plot Rank": {
            "fields": ["PLOTRANK"],
            "fp": fp.PLOTRANK_PATH
        },
    }

    for i in field_values.columns:
        mdfile = mdutils.MdUtils(file_name=field_map[i]["fp"],
                                 author="Samuel Elkind")
        mdfile.new_header(1, title=f"{i} Restricted Values")

        rel_fields = "Relevant Fields: "
        for k in field_map[i]["fields"]:
            rel_fields += f"{k}, "
        rel_fields = rel_fields[:-2]

        mdfile.new_line(rel_fields)
        mdfile.new_line("")

        qmap_vals = [i for i in field_values[i] if i != ""]
        used_vals = []
        for j in field_map[i]["fields"]:
            used_vals = list(
                set(used_vals + list(field_values[field_values[i].isin(
                    faults[j])][i].unique())))
            geomap_specific = list(
                faults[~faults[j].isin(qmap_vals)][j].unique())

        restricted_list = ["Value", "Value Present in Dataset"]
        for item in qmap_vals + geomap_specific:
            if str(item).strip() in restricted_list:
                continue
            restricted_list.append(str(item).strip())
            if item in used_vals or item == "unknown":
                restricted_list.append("![yes](../assets/checkbox.png)")
            else:
                restricted_list.append("")

        mdfile.new_table(2, int(len(restricted_list) / 2), restricted_list,
                         "center")

        mdfile.create_md_file()
示例#14
0
# Load a file not on the path
import runpy
fetcher = runpy.run_path('../notebook/fetcher.py')
fetch_difumo = fetcher['fetch_difumo']

dic = {
    64: "https://osf.io/wjum7/download",
    128: "https://osf.io/n3vba/download",
    256: "https://osf.io/vza2y/download",
    512: "https://osf.io/a23gw/download",
    1024: "https://osf.io/jpdum/download",
}

for n in [64, 128, 256, 512, 1024]:
    n_components = n
    mdFile = mdutils.MdUtils(file_name=os.path.join('..', str(n_components)))
    mdFile.write('| All {} components |'.format(n_components))
    mdFile.new_line()
    mdFile.write('|:---:|')
    mdFile.new_line()
    l = '| [![All components](imgs/display_maps/{0}.jpg "All {0} components")]({1})'
    mdFile.write(l.format(n_components, dic[n]))

    data = fetch_difumo(dimension=n_components)
    annotated_names = data.labels

    iter_line = '| Component {0}: {1} |'
    second_line = '|:---:|'
    iter_third_line = ('| [![Component {0}: {1}]'
                       '({3}/final/{2}.jpg "Component {0}: {1}")]'
                       '({3}/html/{0}.html)|')
示例#15
0
        md.new_table(columns=len(keys),
                     rows=len(sorted_parts) + 1,
                     text=tabulated)


if __name__ == "__main__":
    parser = ArgumentParser(
        description="Extract parts from .SchLib or .LibPkg")
    parser.add_argument("files", nargs='*')
    parser.add_argument("--output", '-o')
    parser.add_argument("--branch", default='master')
    args = parser.parse_args()

    parts = read_files(args.files, args.branch)
    #print(parts)
    mdFile = mdutils.MdUtils(file_name=args.output, title='')
    mdFile.new_paragraph(
        '⚠️ Automatically generated page. Any changes will be overwritten.')
    mdFile.new_line()
    x = mdFile.create_marker('toc')
    mdFile.new_header(level=1, title='By designator')
    write_designators(
        parts, ['id', 'designator', 'Comment', 'description', 'library'],
        mdFile,
        sort_by='designator')
    mdFile.new_header(level=1, title='By library')
    write_designators(
        parts, ['id', 'designator', 'Comment', 'description', 'library'],
        mdFile,
        sort_by='library_name')
    mdFile.new_header(level=1, title='By part number')
    def prepare_report(self, filename):
        """Put content data into an output file"""
        f = mdutils.MdUtils(file_name=filename, title='VFB Content Report ' +
                                                      self.timestamp.date().isoformat())
        f.new_paragraph("Report of content found at ``%s`` on ``%s``"
                        % (self.server[0],
                           self.timestamp.strftime("%a, %d %b %Y %H:%M:%S")))
        f.new_line()
        f.new_line("Ontology Content", bold_italics_code='bic')
        f.new_line()
        anatomy_table_content = ['Anatomy', 'Classes', 'Publications']
        anatomy_table_content.extend(['All Terms',
                                      str(self.all_terms_number),
                                      str(self.all_terms_pubs)])
        anatomy_table_content.extend(['All Nervous System Parts',
                                      str(self.all_nervous_system_number),
                                      str(self.all_nervous_system_pubs)])
        anatomy_table_content.extend(['All Neurons',
                                      str(self.total_neuron_number),
                                      str(self.total_neuron_pub_number)])
        anatomy_table_content.extend(['Characterised Neurons',
                                      str(self.characterised_neuron_number),
                                      str(self.characterised_neuron_pub_number)])
        anatomy_table_content.extend(['Provisional Neurons',
                                      str(self.provisional_neuron_number),
                                      str(self.provisional_neuron_pub_number)])
        anatomy_table_content.extend(['All Nervous System Regions',
                                      str(self.all_region_number),
                                      str(self.all_region_pub_number)])
        anatomy_table_content.extend(['Synaptic Neuropils',
                                      str(self.synaptic_neuropil_number),
                                      str(self.synaptic_neuropil_pub_number)])
        anatomy_table_content.extend(['Neuron Projection Bundles',
                                      str(self.neuron_projection_bundle_number),
                                      str(self.neuron_projection_bundle_pub_number)])
        anatomy_table_content.extend(['Cell Body Rinds',
                                      str(self.cell_body_rind_number),
                                      str(self.cell_body_rind_pub_number)])
        anatomy_table_content.extend(['Sense Organs',
                                      str(self.sense_organ_number),
                                      str(self.sense_organ_pubs)])
        f.new_table(columns=3, rows=11, text=anatomy_table_content, text_align='left')
        f.new_line()
        f.new_line('**%s** formal assertions, of which **%s** are SubClassOf assertions and **%s** are '
                   'other relationship types'
                   % (str(self.all_relationship_number),
                      str(self.isa_relationship_number),
                      str(self.non_isa_relationship_number)))
        f.new_line()
        f.new_line('**%s** formal assertions on nervous system components, of which **%s** are SubClassOf '
                   'assertions and **%s** are other relationship types'
                   % (str(self.ns_all_relationship_number),
                      str(self.ns_isa_relationship_number),
                      str(self.ns_non_isa_relationship_number)))

        f.new_line()
        f.new_line("Image Content", bold_italics_code='bic')
        f.new_line()
        f.new_line('**%s** total images from **%s** datasets'
                   % (str(self.all_image_number),
                      str(self.all_image_ds_number)))
        f.new_line('**%s** single neuron images of **%s** cell types'
                   % (str(self.single_neuron_image_number),
                      str(self.single_neuron_image_type_number)))
        f.new_line('**%s** images of expression patterns of **%s** drivers'
                   % (str(self.exp_pattern_number),
                      str(self.split_exp_pattern_driver_number)))
        f.new_line('**%s** images of expression patterns of **%s** split combinations'
                   % (str(self.split_image_number),
                      str(self.split_image_driver_number)))

        f.new_line()
        f.new_line("Annotations", bold_italics_code='bic')
        f.new_line()
        f.new_line('**%s** annotations recording **%s** types of neurons that **%s** specific '
                   'split combinations are expressed in.'
                   % (str(self.split_neuron_annotations_annotation_number),
                      str(self.split_neuron_annotations_neuron_number),
                      str(self.split_neuron_annotations_split_number)))
        f.new_line('**%s** annotations recording **%s** types of anatomical structures that '
                   '**%s** specific driver lines are expressed in.'
                   % (str(self.driver_anatomy_annotations_annotation_number),
                      str(self.driver_anatomy_annotations_anatomy_number),
                      str(self.driver_anatomy_annotations_EP_number)))
        f.new_line('**%s** annotations recording **%s** types of neurons that '
                   '**%s** specific driver lines are expressed in.'
                   % (str(self.driver_neuron_annotations_annotation_number),
                      str(self.driver_neuron_annotations_neuron_number),
                      str(self.driver_neuron_annotations_EP_number)))

        f.new_line()
        f.new_line("Connectivity", bold_italics_code='bic')

        f.new_line()
        connectivity_table_content = ['Neuron', 'Number of Neurons', 'Input/Output Entity',
                                      'Number of Entities', 'Connections']
        connectivity_table_content.extend(['Any neuron (individuals)', str(self.neuron_connections_neuron_number),
                                           'Any neuron (individuals)', str(self.neuron_connections_neuron_number),
                                           str(self.neuron_connections_connection_number)])
        connectivity_table_content.extend(['Any neuron (individuals)', str(self.region_connections_neuron_number),
                                           'Region (individuals)', str(self.region_connections_region_number),
                                           str(self.region_connections_connection_number)])
        connectivity_table_content.extend(['Any neuron (classes)', str(self.muscle_connections_neuron_number),
                                           'Muscle (classes)', str(self.muscle_connections_muscle_number),
                                           str(self.muscle_connections_connection_number)])
        connectivity_table_content.extend(['Any neuron (classes)', str(self.sensory_connections_neuron_number),
                                           'Sense organ (classes)', str(self.sensory_connections_sense_organ_number),
                                           str(self.sensory_connections_connection_number)])

        f.new_table(columns=5, rows=5, text=connectivity_table_content, text_align='left')
        f.new_line()

        f.create_md_file()
示例#17
0
 def generate_yaml_markdown(self, filename, config):
     # remove leading ./
     new_filename = filename.split("/", 1)[1]
     # remove .yaml
     new_filename = new_filename.rsplit(".", 1)[0]
     tmp_filename = os.path.join(config["docs_dir"], new_filename)
     filecontent = None
     try:
         with open(filename, "r") as f:
             filecontent = f.read()
     except FileNotFoundError:
         return None
     try:
         content = yaml.load(filecontent, Loader=yaml.Loader)
         if "metadata" in content.keys():
             metadata = content["metadata"]
             mdFile = mdutils.MdUtils(file_name=tmp_filename)
             tags_section = "---\n"
             tags_section += "title: %s\n" % metadata["name"]
             tags_section += "tags:\n"
             scope_list = metadata.get("scope", [])
             os_list = metadata.get("os", [])
             device_list = metadata.get("devices", [])
             if scope_list is not None:
                 for item in scope_list:
                     tags_section += " - %s\n" % item
             tags_section += "---\n"
             mdFile.new_header(level=1, title=new_filename)
             mdFile.new_header(level=2, title="Description")
             mdFile.write(metadata["description"])
             mdFile.new_header(level=2, title="Maintainer")
             maintainer_list = metadata.get("maintainer", None)
             if maintainer_list is not None:
                 for item in maintainer_list:
                     mdFile.new_line(" * %s" % item)
             self.__add_list_with_header(mdFile, "OS", os_list)
             self.__add_list_with_header(mdFile, "Scope", scope_list)
             self.__add_list_with_header(mdFile, "Devices", device_list)
             mdFile.new_header(level=2, title="Steps to reproduce")
             steps_list = content["run"]["steps"]
             for line in steps_list:
                 bullet_string = " * "
                 if str(line).startswith("#"):
                     bullet_string = " * \\"
                 mdFile.new_line(bullet_string + str(line))
             try:
                 os.makedirs(os.path.dirname(tmp_filename))
             except OSError as exc:  # Guard against race condition
                 if exc.errno != errno.EEXIST:
                     raise
             md_file = MarkDownFile(mdFile.file_name)
             md_file.rewrite_all_file(
                 data=tags_section
                 + mdFile.title
                 + mdFile.table_of_contents
                 + mdFile.file_data_text
             )
             # add row to tests_table
             table_key = None
             for table_name in self.table_dirs:
                 if new_filename.startswith(table_name):
                     table_key = table_name
             if table_key is not None:
                 self.test_tables[table_key].append(
                     {
                         "name": "[%s](%s.md)" % (metadata["name"], new_filename),
                         "description": metadata["description"],
                         "scope": ", ".join(
                             [
                                 "[%s](tags.md#%s)"
                                 % (x, x.lower().replace(" ", "-").replace("/", ""))
                                 for x in scope_list
                             ]
                         ),
                     }
                 )
             return new_filename + ".md"
     except yaml.YAMLError:
         return None
     except KeyError:
         return None
示例#18
0
import numpy as np
import mdutils

# Load a file not on the path
import runpy
fetcher = runpy.run_path('../../notebook/fetcher.py')
fetch_difumo = fetcher['fetch_difumo']

n_components = 64
labels = fetch_difumo(dimension=n_components).labels

title = "Structures related to DiFuMo 64 {0}"

write_line = '![{0}]({0}.jpg "{1}")'

link_back_line = ("[Go back to corresponding component]"
                  "(https://parietal-inria.github.io/DiFuMo/64/html/{0}.html)")

for idx, label in zip(np.arange(n_components) + 1, labels['Difumo_names']):
    mdFile = mdutils.MdUtils(file_name='component_' + str(idx))
    title_ = title.format(label)
    mdFile.write("## " + title_)
    mdFile.new_paragraph(write_line.format(idx, title_))
    mdFile.new_paragraph(link_back_line.format(idx))
    mdFile.create_md_file()
示例#19
0
import mdutils
import os

dic = {
    64: "https://osf.io/wjum7/download",
    128: "https://osf.io/n3vba/download",
    256: "https://osf.io/vza2y/download",
    512: "https://osf.io/a23gw/download",
    1024: "https://osf.io/jpdum/download",
}

mdFile = mdutils.MdUtils(file_name=os.path.join('..', 'index'))

mdFile.write('# Dictionaries of multiple dimensions')

link_location = "https://parietal-inria.github.io/DiFuMo/{0}"
start_line = '[![{0} dimensions](imgs/front/{0}.jpg "{0} dimensions")]({1})'
# start_line = '![{0} dimensions](imgs/front/{0}.jpg "{0} dimensions")'
next_line = ('See regions for: [{0} dimensions]({0} "Labels '
             'for {0} dimensions") &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;')

# Iteration
for n in [64, 128, 256, 512, 1024]:
    link_location = ("https://parietal-inria.github.io/"
                     "DiFuMo/{0}".format(n))
    mdFile.new_paragraph(start_line.format(n, link_location))
    mdFile.new_paragraph(next_line.format(n))
    mdFile.write('[Download]({0})'.format(dic[n]))

mdFile.create_md_file()
示例#20
0
#!/usr/local/bin/python3

import pprint
import boto3
import mdutils
import json

# setup some generic stuff, boto object, pprint
s3 = boto3.client('s3', region_name="us-west-1")
response = s3.list_buckets()
pp = pprint.PrettyPrinter(indent=4)

# lets frame up the markdown document

mdDoc = mdutils.MdUtils(file_name='s3-bucket-inventory',
                        title='S3 Bucket Inventory')

#print('\n*** mapping buckets ***\n')
for bucket in response['Buckets']:
    n = bucket["Name"]
    #print("\n# %s \n" % n)
    mdDoc.new_header(level=1, title=str(n))

    # first lets check bucket policy
    mdDoc.new_header(level=2, title="Bucket Policy")
    try:
        bucket_policy = s3.get_bucket_policy(Bucket=n)
        statement = json.loads(bucket_policy["Policy"])
        temp = str(statement["Statement"])
        end = len(temp) - 1
        r = temp[1:end]