def generate_component_documentation_pages(component_sets): for cset in component_sets: src_dir = f"documentation/{cset['type']}/components" components = get_components(src_dir) for component in components: jinja_input_path = f"examples/{cset['type']}/components" documentation_path = f"{src_dir}/{component}/README.md" if os.path.isfile(documentation_path): documentation = Frontmatter.read_file(documentation_path) # render the documentation page for the component render( f"{cset['dest']}/{component}/index.html", component_template, rendered_markdown=md.convert(documentation["body"]), section=cset["dest"], ) extras = { "display_map": is_displaying_map(documentation), } if reqs_org_data is not None: extras["organisation_data"] = organisations # render all examples for component render_example_pages( component, src_dir, cset["dest"], jinja_input_path, **extras ) else: print(f"No documentation for component: {component}")
def generate_template_documentation_pages(directory): for p in os.listdir(directory): output_root = directory.replace( "documentation/digital-land/templates", "template" ) if os.path.isdir(os.path.join(directory, p)): generate_template_documentation_pages(os.path.join(directory, p)) elif p.endswith(".md"): documentation = Frontmatter.read_file(f"{directory}/{p}") dest = ( os.path.join(output_root, "index.html") if p == "index.md" else os.path.join(output_root, p.replace(".md", ""), "index.html") ) # render the documentation page for the template render( dest, component_template, rendered_markdown=md.convert(documentation["body"]), section="template", ) else: include_path = os.path.join(directory, p).replace( "documentation", "examples" ) render( os.path.join(output_root, p), example_template, partial_name=include_path, )
def add_post(path): post = Frontmatter.read_file(path) body = markdown.markdown(post['body']) sql = ('INSERT INTO post(title, description, body) ' 'VALUES (?, ?, ?)') try: post_ref = insert_db(sql, (post['attributes']['title'], post['attributes']['description'], body)) except sqlite3.IntegrityError: print('Failed to add duplicate post') return False tags = post['attributes']['tags'] for tag in tags: tag_ref = None query = ('SELECT tag.id FROM tag WHERE tag.name = ?') found_tag = query_db(query, [tag]) if not found_tag: sql = ('INSERT INTO tag(name) ' 'VALUES (?)') tag_ref = insert_db(sql, [tag]) else: tag_ref = found_tag[0]['id'] sql = ('INSERT INTO post_tag(post_ref, tag_ref) ' 'VALUES (?, ?)') insert_db(sql, [post_ref, tag_ref]) return True
def is_yaml_exist(filepath): post = Frontmatter.read_file(filepath) if post["frontmatter"]: #need to also check "Title:" node inside frontmatter object return True else: return False
def get_update_content(filename): file_content = Frontmatter.read_file(filename) return { "name": file_content["attributes"].get("name"), "type": file_content["attributes"].get("type"), "date": file_content["attributes"].get("date"), "frontmatter": file_content["attributes"], "body": file_content["body"], }
def get_project_content(filename): file_content = Frontmatter.read_file(filename) return { "name": file_content["attributes"].get("name"), "status": file_content["attributes"].get("status"), "description": file_content["attributes"].get("one-liner"), "frontmatter": file_content["attributes"], "body": file_content["body"], }
def get_date_from_frontmatter(f: Path, field) -> date: try: data = Frontmatter.read_file(f) value = data['attributes'][field] if type(value) is date: return value return value.date() except Exception: print(f'Couldn\'t get "{field}" for file {f}')
def render_project_content_pages(project): content_dir = f"{project_dir}{project}/content" md_files = markdown_files_only(os.listdir(content_dir)) for f in md_files: file_content = Frontmatter.read_file(f"{content_dir}/{f}") html = compile_markdown(md, file_content["body"]) render( f"{project}/{f.replace('.md', '')}/index.html", content_template, content=html, toc=md.toc_tokens, fm=file_content["attributes"], project=project, )
def update_breadcrumb(url): parse_result = parse_state(url) if "title" in parse_result: title = parse_result["title"][0] try: filename = glob_re(f"{title}.md", "../blog")[0] except: raise PreventUpdate fm_dict = Frontmatter.read_file("../blog/" + filename) fm_dict["filename"] = filename.split(".md")[0] return fm_dict["attributes"]["title"]
def generate_guidance_page(root): page_content = Frontmatter.read_file(f"{root}/index.md") html = markdown_compile(page_content["body"]) # strip 'content/' dist = root.replace("content/", "") content = {"main": html} if page_content["attributes"].get("hasContents"): content["contents"] = get_contents_section() render( f"{dist}/index.html", guidance_template, content=content, fm=page_content["attributes"], )
def get_blog_list(): filelist = [] for filename in os.listdir(CONTENT_DIR): if filename.endswith('.md'): post = Frontmatter.read_file(CONTENT_DIR + filename) filelist.append({ "abbrlink": post['attributes']['abbrlink'], "title": post['attributes']['title'], "date": time.mktime(post['attributes']['date'].timetuple()) * 1000 }) filelist.sort(key=lambda r: r["date"], reverse=True) else: print("Error: get_blog_list") return jsonify(filelist), 200
def get_salt_list(): booklist = [] for bookname in os.listdir(SALT_DIR): if bookname.endswith('.md'): book = Frontmatter.read_file(SALT_DIR + bookname) booklist.append({ "title": book['attributes']['title'], "author": book['attributes']['author'], "notenum": book['attributes']['num'], "rating": book['attributes']['rating'], "tag": book['attributes']['tags'], "id": book['attributes']['id'] }) booklist.sort(key=lambda r: r["id"], reverse=True) else: print("Error: get_salt_list") return jsonify(booklist), 200
def update_content(url): parse_result = parse_state(url) if "title" in parse_result: title = parse_result["title"][0] try: filename = glob_re(f"{title}.md", "../blog")[0] except: raise PreventUpdate blog_post = Frontmatter.read_file("../blog/" + filename) blog_post["filename"] = filename.split(".md")[0] return [ html.A( html.H2(blog_post["attributes"]["title"]), href=f"/blog?post={blog_post['filename']}", id=blog_post["filename"], ), html.Hr(), html.P( [ " by ", blog_post["attributes"]["author"], ", ", humanize.naturaltime( datetime.now() - datetime.strptime( blog_post["attributes"]["date"], "%Y-%m-%d")), ], className="subtitle mt-0 text-muted small", ), dash_dangerously_set_inner_html.DangerouslySetInnerHTML( blog_post["body"]) if "type" in blog_post["attributes"] and blog_post["attributes"]["type"] == "html" else dcc.Markdown(blog_post["body"]), ]
def component_news_4col(): filenames = glob_re(r".*.md", "../blog") blog_posts = [] for filename in filenames: fm_dict = Frontmatter.read_file("../blog/" + filename) fm_dict["filename"] = filename.split(".md")[0] blog_posts.append(fm_dict) # Sort by date blog_posts = sorted(blog_posts, key=lambda x: x["attributes"]["date"], reverse=True) body = [] for i in range(min(len(blog_posts), 5)): blog_post = blog_posts[i] blog_timedelta = humanize.naturaltime( datetime.now() - datetime.strptime(blog_post["attributes"]["date"], "%Y-%m-%d")) body.extend([ html.Div(blog_timedelta, className="subtitle mt-0 text-muted small"), html.A( html.P(blog_post["attributes"]["title"], className="lead"), href=f"/blog/post?title={blog_post['filename']}", ), ]) return dbc.Col( [html.H3("Latest News")] + body + [html.A(html.P("View all posts"), href="/blog")], lg=4, )
num_errs += 1 return num_errs errs = 0 dict = {} ids = [] print("Checking notes frontmatter...\n") for fname in glob.glob(path + "/**/*", recursive=True): # Find all files and subdirectories # Skip directories if os.path.isdir(fname): continue # Parse file's front matter fmatter = Frontmatter.read_file(fname) err = check_frontmatter(fmatter) errs += err if err == 0: id = fmatter['attributes']["id"] if id in dict: # Check if id already exists print(id_err_str.format(err="id already exists", id=id, f1=dict[id]["filename"], f2=fname)) errs += 1 else: with open(fname,"r") as file: # Build dictionary of file frontmatters link_list = [] line_num = 0 for line in file: line_num += 1 links = re.findall('\[[^\[\]\)\(]*\]\([^\[\]\)\(]*\)', line) # Find all markdown links on the line if links != []:
#%% from frontmatter import Frontmatter post = Frontmatter.read_file( '../saturn-drmtest.github.io/posts/02tabs/2017-08-20-travel-is-meaningful.md' ) # %% post # %% post['body'] # %% import markdown htmlres = markdown.markdown(post['body']) htmlres # %% post2 = Frontmatter.read_file( '../saturn-drmtest.github.io/posts/01blog/01digest/2020-01-23-放言五首.md') post2['body'] # %% htmlres2 = markdown.markdown(post2['body']) htmlres2 # %% outputFile = open('testoutput.html', 'w',
def get_book_info_douban(bookdir): book = Frontmatter.read_file(bookdir) url = DOUBAN_BOOK_API + "search?q=" + book['attributes'][ 'title'] + "&apikey=" + APIKEY raw_info = requests.get(url) rating = 'N/A' tags = [] if raw_info.status_code == 200: info = raw_info.json() info = info['books'][0] rating = info['rating']['average'] raw_tag = info['tags'] for t in raw_tag: if t['title'] != book['attributes']['title'] and t[ 'title'] != book['attributes']['author']: tags.append(t['title']) final_rating_str = 'rating: ' + str(rating) + "\n" final_tags_str = "tags: \n" for t in tags: final_tags_str += "- " + t + "\n" # update ratings and tags if exist if 'rating' in book['attributes']: curr_file_r = open(bookdir, 'r') lines = curr_file_r.readlines() for i, item in enumerate(lines): if item.startswith('rating:'): lines[i] = final_rating_str break # rewrite lines back curr_file_r.close() curr_file_w = open(bookdir, 'w') for l in lines: curr_file_w.write(l) curr_file_w.close() else: curr_file_r = open(bookdir, 'r') lines = curr_file_r.readlines() for i, item in enumerate(lines): if item.startswith('num:'): lines.insert(i + 1, final_rating_str) curr_file_r.close() curr_file_w = open(bookdir, 'w') for l in lines: curr_file_w.write(l) curr_file_w.close() if 'tags' in book['attributes']: curr_file_r = open(bookdir, 'r') lines = curr_file_r.readlines() for i, item in enumerate(lines): if item.startswith('tags:'): to_be_removed = [] for j in range(i + 1, len(lines)): if lines[j].startswith("- "): to_be_removed.append(lines[j]) else: break for r in to_be_removed: lines.remove(r) lines[i] = final_tags_str curr_file_r.close() curr_file_w = open(bookdir, 'w') for l in lines: curr_file_w.write(l) curr_file_w.close() else: curr_file_r = open(bookdir, 'r') lines = curr_file_r.readlines() for i, item in enumerate(lines): if item.startswith('rating:'): lines.insert(i + 1, final_tags_str) curr_file_r.close() curr_file_w = open(bookdir, 'w') for l in lines: curr_file_w.write(l) curr_file_w.close()
def __init__(self, filePath): self.MDFilePath = filePath self.MDINFOs = Frontmatter.read_file(self.MDFilePath) self.headINFODict = self.MDINFOs['attributes'] self.bodyINFOStr = self.MDINFOs['body']
from frontmatter import Frontmatter authors = Frontmatter.read_file('article_archive/authors.yml')['attributes'] role = Role.query.filter_by(name="Writer").first() for author in authors: print(author) if not User.query.filter_by(username=author).first(): u = User(username=author, first_name=authors[author]['name'].split(' ')[0], last_name=authors[author]['name'].split()[1], email=authors[author]['email'], role=role, about_me=authors[author].get('bio')) db.session.add(u) db.session.commit()
def body(value): # Find page number parse_result = parse_state(value) if "page" not in parse_result: parse_result["page"] = ["1"] page_int = int(parse_result["page"][0]) # Load blog posts filenames = glob_re(r".*.md", "../blog") n_posts = len(filenames) blog_posts = [] for filename in filenames: fm_dict = Frontmatter.read_file("../blog/" + filename) fm_dict["filename"] = filename.split(".md")[0] blog_posts.append(fm_dict) # Sort by date blog_posts = sorted(blog_posts, key=lambda x: x["attributes"]["date"], reverse=True) # Render post previews h = html2text.HTML2Text() h.ignore_links = True n_posts_per_page = 5 start = (page_int - 1) * n_posts_per_page end = min((page_int) * n_posts_per_page, n_posts) body = [] for i in range(start, end): blog_post = blog_posts[i] if ("type" in blog_post["attributes"] and blog_post["attributes"]["type"] == "html"): body_html = blog_post["body"] else: body_html = markdown2.markdown(blog_post["body"], extras=markdown_extras) preview = textwrap.shorten(h.handle(body_html), 280, placeholder="...") body.append( dbc.Row( dbc.Col( [ html.A( html.H2( blog_post["attributes"]["title"], style={"padding-top": "8px"}, ), href=f"post?title={blog_post['filename']}", id=blog_post["filename"], ), html.P( [ " by ", blog_post["attributes"]["author"], ", ", humanize.naturaltime(datetime.now( ) - datetime.strptime( blog_post["attributes"]["date"], "%Y-%m-%d", )), ], className="subtitle mt-0 text-muted small", ), html.Div(preview, style={"padding-bottom": "8px"}), html.A( html.P( html.Strong( "Read more", className="text-left", ), style={"padding-bottom": "24px"}, ), href=f"post?title={blog_post['filename']}", ), html.Hr(), ], lg=8, ))) # Add bottom navigation # Previous | Page X of Y | Earlier n_pages = math.ceil(n_posts / n_posts_per_page) body.append( dbc.Row([ dbc.Col( html.A( html.P("< Previous Posts"), id="previous_link", href=f"?page={page_int+1}", className="text-left", ) if page_int < n_pages else [], lg=2, ), dbc.Col( html.P( f"Page {page_int} of {n_pages}", className="text-center", ), lg=4, ), dbc.Col( html.A( html.P("Earlier Posts >"), id="previous_link", href=f"?page={page_int-1}", className="text-right", ) if page_int > 1 else [], lg=2, ), ])) return body
cacheFileName = "./meetings_index/actionItemProcessedPosts.cache" indices = MDIndices(cacheFileName) for fname in glob.glob("meetings/**/*.md", recursive=True): # if fname.startswith("index\\"): # continue # if fname.startswith("template\\"): # continue print(f'Processing file: {fname}') post = Frontmatter.read_file(fname) attrs = post['attributes'] if attrs is None: continue # for i, k in enumerate(attrs): # print(f'{k}: {attrs[k]}') mdpost = MDPost(fname, attrs) indices.addPost(mdpost) with open("./meetings_index/ix_people.md", "w") as f_people: indices.postProcessIndex(f_people, "People", indices.personIndex) with open("./meetings_index/ix_tags.md", "w") as f_tags: indices.postProcessIndex(f_tags, "Tags", indices.tagIndex)
app = Flask(__name__) CORS(app) CONTENT_DIR = "docs/blog/" SALT_DIR = "docs/salt/" INFO_LIST = {"title", "date"} BLOG_LIST = {} BOOK_LIST = {} TOTAL_NOTE_NUM = 0 # will be updated later # load all docs for filename in os.listdir(CONTENT_DIR): if filename.endswith('.md'): post = Frontmatter.read_file(CONTENT_DIR + filename) BLOG_LIST.update({ post['attributes']['abbrlink']: { "title": post['attributes']['title'], "date": time.mktime(post['attributes']['date'].timetuple()) * 1000, "body": post['body'] } }) else: print("Error: initial load on docs") # load book docs for bookname in os.listdir(SALT_DIR): if bookname.endswith('.md'): book = Frontmatter.read_file(SALT_DIR + bookname)
#%% from frontmatter import Frontmatter import markdown fp = 'testtemplate.md' # %% head_body = Frontmatter.read_file(fp) type(head_body['body']) # %% bodyhtml = markdown.markdown(head_body['body'], extensions=['toc', 'tables', 'fenced_code']) # bodyhtml = markdown.markdown(head_body['body'], extensions=['toc', 'tables','fenced_code', 'codehilite']) bodyhtml # %% ofp = 'test.html' of = open(ofp, 'w', encoding='utf-8', errors='xmlcharrefreplace') of.write(bodyhtml) of.close() # %% md = markdown.Markdown(extensions=['toc', 'tables', 'fenced_code']) # need fenced_code here too # %% bodytoc = md.convert(head_body['body']) # bodytoc bodyhtml == bodytoc
def main(input): template = Frontmatter.read_file(input) for solution in problemSpace(template=template): print(solution)
from markdown import markdown def govuk_markdown(text): text = markdown(text) text = text.replace("<h1>", """<h1 class="govuk-heading-xl">""") text = text.replace("<h2>", """<h2 class="govuk-heading-l">""") text = text.replace("<h3>", """<h2 class="govuk-heading-m">""") text = text.replace("<p>", """<p class="govuk-body">""") return text multi_loader = jinja2.ChoiceLoader([ jinja2.FileSystemLoader(searchpath="./templates"), jinja2.PrefixLoader({ 'govuk-jinja-components': jinja2.PackageLoader('govuk_jinja_components') }) ]) env = jinja2.Environment(loader=multi_loader) title = "Finding the local authority for an address" post = Frontmatter.read_file("content/guidance.md") title = post["attributes"]["title"] content = govuk_markdown(post["body"]) with open("docs/index.html", "w") as f: f.write( env.get_template("guidance.html").render(title=title, content=content))
import os import random from datetime import datetime from frontmatter import Frontmatter post_id = [] CONTENT_DIR = "docs/blog/" FILENAME = "new.md" CONTENT = "" # save all existing id for filename in os.listdir(CONTENT_DIR): if filename.endswith('.md'): post = Frontmatter.read_file(CONTENT_DIR + filename) post_id.append(post['attributes']['abbrlink']) # generate a new id new_id = random.randint(10000, 99999) while new_id in post_id: new_id = random.randint(10000, 99999) # write out the content CONTENT = "---\ntitle: \nabbrlink: " + str(new_id) + "\ndate: " + datetime.now( ).strftime("%Y-%m-%d %H:%M:%S") + "\n---\n\n" # write the new front matter to file, with the new id f = open(CONTENT_DIR + FILENAME, 'a+') f.write(CONTENT) f.close() # finish
from frontmatter import Frontmatter post = Frontmatter.read_file('./tests/testfile.md') print("\n[attributes]") print(post['attributes']) print("\n[body]") if post['body'] == "": print("ERROR: body not captured properly.") print("TEST FAILED.") exit(1) print(post['body']) print("\n[frontmatter]") if post['frontmatter'] == "": print("ERROR: frontmatter not captured properly.") print("TEST FAILED.") exit(1) print(post["frontmatter"]) print("TEST SUCCEEDED.") exit(0)