def fetch(targets, strip_yaml=False): """ fetch file(s) from url(s), concatenate, and save locally """ logger = logging.getLogger(sys._getframe().f_code.co_name) for target in targets: logger.debug('requesting {0}'.format(target[0])) r = requests.get(target[0], stream=True) if r.status_code == 200: # appending ensures we can aggregate, e.g., .gitignore content with open('{0}'.format(target[1]), 'ab') as f: r.raw.decode_content = True shutil.copyfileobj(r.raw, f) else: raise Exception('fetch of {0} failed with status code {1}' ''.format([0], r.status_code)) sys.exit(1) logger.debug('successfully saved {0} as {1}'.format(*target)) if strip_yaml: for target in targets: fp = target[1] post = frontmatter.load(fp) shutil.copy(fp, os.path.splitext(fp)[0] + '.bak') with open(fp, 'w') as f: f.write(post.content) logger.debug('removed yaml front matter from {0}'.format(fp))
def test_empty_frontmatter(self): "Frontmatter, but no metadata" post = frontmatter.load('tests/empty-frontmatter.txt') content = six.text_type("I have frontmatter but no metadata.") self.assertEqual(post.metadata, {}) self.assertEqual(post.content, content)
def test_frontmatter(): """Test the lib by loading a markdown file with frontmatter.""" fm = frontmatter.load('sample-md/helloworld.md') assert fm['draft'] is True assert fm['title'] == 'Hello World' assert fm['template'] == 'base.html'
def get_posts(): md_files = [] for path, subdirs, files in os.walk(CONTENT): for name in files: if name.endswith(EXTENSIONS): md_files.append(os.path.join(path, name)) posts = [] for file in md_files: post = frontmatter.load(file) post['file_path'] = file post['file_name'] = os.path.basename(file) post['permalink'] = post.get('permalink', True) post['template'] = post.get('template', DEFAULT_TEMPLATE) post['content'] = post.content noext_name = os.path.splitext(post['file_name'])[0] if post['permalink']: slug = slugify(noext_name) post['output_path'] = os.path.join(slug, 'index.html') post['slug'] = slug else: post['output_path'] = noext_name + '.html' post['slug'] = post['output_path'] posts.append(post) return posts
def loadPost(filename): if not os.path.isfile(filename): print("Not a valid file") return [] with open(filename) as f: post = frontmatter.load(f) return post
def __init__(self, mdFile, contentDir): print("creating new page :" + mdFile + " - " + contentDir) md_file = join(contentDir, mdFile) if isfile(md_file): post = frontmatter.load(md_file) self.content = markdown2.markdown(post.content, extras=['fenced-code-blocks']) self.id = basename(contentDir) self.slug = self.id + ".html" self.createdDateTxt = strftime("%d %b %Y", gmtime(getmtime(md_file))) self.createdDate = getmtime(md_file) for key in post.keys(): setattr(self, key, post[key]) self.imageFiles = [] if isdir(contentDir): for sub_dir in filter(lambda d: isdir(join(contentDir, d)), listdir(contentDir)): setattr(self, sub_dir, [ f for f in listdir(join(contentDir, sub_dir)) if isfile(join(contentDir, sub_dir, f)) and splitext(f)[1] != ".md" ]) self.imageFiles.extend([ join(contentDir, sub_dir, f) for f in getattr(self, sub_dir) if splitext(f)[1].lower() in (".jpg", ".png") ])
def main(): # 1. collect the current order of posts # 2. sort and check if sorted order is sequential for category in categories: postFamily = [] #get all posts with frontmatter in md format for md_path in paths: post = frontmatter.load(str(md_path)) if ".ipynb_checkpoints" in str(md_path): continue metadata = get_meta(post) if len(post.metadata.keys()) > 0: if "display_as" in metadata: if metadata['display_as'] == category: postFamily.append({'path':str(md_path), 'order' : metadata['order']}) sortedPostFamily = sorted(postFamily, key = lambda i: i['order']) order = [ p['order'] for p in sortedPostFamily ] print(order) if not checkConsecutive(order): if enforce is True: print('Order Check Did Not Pass! ENFORCING CORRECT ORDER for {}'.format(category)) enforceOrder(sortedPostFamily) else: arg = file_path if file_path != "build/html" else "python" raise Exception("Order Check Failed in '{}' display_as! Run 'python check-or-enforce-order.py {} enforce' to resolve!".format(category, arg)) print("Order Check Passed for {} display_as in {}!".format(category, file_path)) order = []
def check_one_file_frontmatter(file_path): """ given the path to a markdown file, make sure that the frontmatter includes the required metadata """ # print(file_path) logger = logging.getLogger(__name__) name = file_path.name if not name.endswith(".md"): return front = frontmatter.load(file_path) required = ["title"] allowed = [ "_db_id", "content_type", "pre", "weight", "ready", "date", "disableToc", "todo", "ncit_unit_standard", "ncit_specific_outcomes", "nqf", "unit_standards", "prerequisites", "tags", "story_points", "available_flavours", "topic_needs_review" # "from_repo", ] if "_db_id" in front: IdChaecker.check(front["_db_id"], front["title"], file_path) if str(file_path).startswith("content/projects"): required.append("submission_type") hard_prereq = front.get("prerequisites", {}).get("hard", []) if "submission_type" in front: assert ( front["submission_type"] in allowed_submission_types ), f"{file_path} invalid submission type: {front['submission_type']}" if front["submission_type"] == "continue_repo": required.append("from_repo") # if front["title"] == "Calandar widget": # breakpoint() assert ( front["from_repo"] in hard_prereq ), f"{file_path}: expected hard prepreq: '{front['from_repo']}'\nonly found: {hard_prereq}" if front["submission_type"] != "nosubmit": required.append("available_flavours") if front["submission_type"] == "repo": allowed.append("template_repo") for key in front.keys(): if key not in required + allowed: logger.warning(f"{file_path} has unrecognized frontmatter: {key}") continue for key in required: if key not in front.keys(): logger.error(f"{file_path} has MISSING frontmatter: {key}") continue if "available_flavours" in front and front.get( "submission_type") != "nosubmit": for option in front["available_flavours"]: assert option in flat_options, f"{option} not in {flat_options}"
def get_full(self): with open(self.file_, encoding='utf-8') as fil_: yaml = load(fil_) self.content = yaml.content self.title = yaml['title'] self.data = Data(yaml)
def add_agent_information(search_path, file_component): """ Will check to see if the file needs to be processed before updating the values. Will then populate the new agent information with what should be used in the future. :param search_path: :param file_component: :return: """ if file_component.suffix != '.md': return entry = frontmatter.load(file_component) write_file = False # Check to see if the agent information has been defined in the log file if fm_keys.AGENT_DEFINITION not in entry.metadata: entry.metadata[fm_keys.AGENT_DEFINITION] = dict( DEFAULT_AGENT_DEFINTION) write_file = True else: agent_definition = entry.metadata[fm_keys.AGENT_DEFINITION] try: for key in (fm_keys.AGENT_NAME, fm_keys.AGENT_VERSION, fm_keys.FILE_VERSION): write_file = write_file or _check_set_value( agent_definition, key) except (AttributeError | TypeError): entry.metadata[fm_keys.AGENT_DEFINITION] = DEFAULT_AGENT_DEFINTION write_file = True if write_file: with open(file_component, "w") as output_file: output_file.write(frontmatter.dumps(entry))
def test_with_markdown_content(self): "Parse frontmatter and only the frontmatter" post = frontmatter.load('tests/hello-markdown.markdown') metadata = {'author': 'bob', 'something': 'else', 'test': 'tester'} for k, v in metadata.items(): self.assertEqual(post[k], v)
def main(args): parseArgs(args) post = frontmatter.load(g_fileName) if post.metadata is None or len(post.metadata) == 0: logging.critical('File must start with front matter') return 1 if g_add_rtl: post.content = '<div style="direction: rtl;" markdown="1">' + post.content + '</div>' if 'confluence_macros' in post.metadata: if post.metadata['confluence_macros'] is not None: post.content = post.content + post.metadata['confluence_macros'] html = markdown.markdown(post.content, ['markdown.extensions.extra']) try: url = confluence(html, post.metadata) except Exception as inst: logging.critical('Confluence error') logging.critical(inst) return 1 try: jira(url, post.metadata) except Exception as inst: logging.critical('Jira error') logging.critical(inst) return 1 return 0
def parse_md(self, ssg_engine, md_src): print 'processing %s' % md_src if not exists(md_src): raise Exception('%s does not exist' % md_src) post = frontmatter.load(md_src) front = post.metadata content = markdown.markdown(post.content, extensions=['markdown.extensions.tables']) self._parse_metadata(front) output_fn = self._start_date_str( ) + '-' + ssg_engine.sanitize_filename(self.name) + '.html' relative_path = join('events', output_fn) if self.url or self.has_tag(Tags.NOPAGE): self.output_path = None else: self.url = relative_path self.output_path = join(ssg_engine.output_dir, relative_path) self.render_cntx = ssg_engine.default_context() self.render_cntx['title'] = self.name self.render_cntx['metadata'] = self.to_dict() self.render_cntx['content'] = content
def run(path, kwargs): # Create a temporary working directory tmpdir = None try: tmpdir = tempfile.mkdtemp() g = GitURL(path) if g.valid: # Git repo url = g.to_ssh() dest = os.path.join(tmpdir, g.repo) clip.echo('Cloning git repo "{}" to "{}"...'.format(url, dest)) git.Repo.clone_from(url, dest) deploy_dir(dest, kwargs) elif os.path.isdir(path): # Directory dest = os.path.join(tmpdir, os.path.basename(path)) clip.echo('Copying directory "{}" to "{}"...'.format(path, dest)) shutil.copytree(path, dest) deploy_dir(dest, kwargs) else: # File dest = os.path.join(tmpdir, os.path.basename(path)) parsed = frontmatter.load(path) clip.echo('Copying file "{}" to "{}"...'.format(path, dest)) with open(dest, 'w') as f: f.write(parsed.content) deploy_file(dest, kwargs, parsed.metadata) finally: # Clean up our temporary working directory if tmpdir: utils.delete_resource(tmpdir)
def get(self, filename, reset=False): """ Get a single processed file. Uses a cached version if `run` has already been called, unless `reset` is True. """ if filename in self.files and not reset: return self.files[filename] # load a single file, and process files = {} path = os.path.join(self.source, filename) files[filename] = frontmatter.load(path, filename=filename, slug=os.path.splitext(filename)[0]) # call middleware for func in self.middleware: func(files, self) # cache the processed post self.files.update(files) # return just the post try: return files[filename] except KeyError: raise PostNotFound('{0} not found'.format(filename))
def get_items(collections=[], path="", structured=True, json_format=False): """ Gets all dataobjs. Parameters: - **collections** - filter dataobj by type, eg. bookmark / note - **path** - filter by path - **structured: if set to True, will return a Directory object, otherwise data will just be returned as a list of dataobjs - **json_format**: boolean value used internally to pre-process dataobjs to send back a json response. """ datacont = Directory(path or "root") if structured else [] root_dir = get_data_dir() / path.strip("/") if not root_dir.exists(): raise FileNotFoundError for filepath in root_dir.rglob("*"): if structured: paths = filepath.relative_to(root_dir) current_dir = datacont # iterate through parent directories for segment in paths.parts[:-1]: # directory has not been saved in tree yet if segment not in current_dir.child_dirs: current_dir.child_dirs[segment] = Directory(segment) current_dir = current_dir.child_dirs[segment] if paths.parts[-1].endswith(".md"): data = frontmatter.load(filepath) current_dir.child_files.append(data) else: if filepath.parts[-1].endswith(".md"): data = frontmatter.load(filepath) data["fullpath"] = str(filepath.parent.relative_to(root_dir)) if len(collections) == 0 or \ any([collection == data["type"] for collection in collections]): if json_format: dict_dataobj = data.__dict__ # remove unnecessary yaml handler dict_dataobj.pop("handler") datacont.append(dict_dataobj) else: datacont.append(data) return datacont
def render_page(file_): with open(file_, encoding='utf-8') as data: yaml = load(data) page = Page('page', yaml) page.lang = set_lang(yaml) page.app = app page.langs = get_langs_from_ref(yaml, page.lang) if 'display_tags' in yaml.keys() and yaml['display_tags'] == False: page.display['tags'] = False else: if 'display_tags' in app.yasifipo["config"]["default"].keys() and app.yasifipo["config"]["default"]["display_tags"] == False: page.display['tags'] = False else: page.tags_display = page.get_tags_display(yaml) if 'display_cucumber' not in yaml.keys() or ('display_cucumber' in yaml.keys() and yaml['display_cucumber'] != False): page.display['cucumber'] = True if 'parent' in yaml.keys() and 'ref' in yaml.keys(): page.cucumber = get_page_cucumber(app.yasifipo["refs"][yaml['ref']][page.lang]['file'], page.lang) else: page.cucumber = [] else: page.cucumber = [] get_lists(page, yaml, request) if 'layout' in yaml.keys(): layout = 'page/' + yaml['layout'] else: layout = app.yasifipo["config"]["layout_page"] page.title = yaml['title'] env = Environment() env.filters['yasifipo'] = yasifipo env.filters['youtube'] = youtube env.filters['onlydate'] = onlydate env.filters['include'] = include env.filters['static'] = static page.content = Markup(markdown(env.from_string(pre_filter({'file':file_}, yaml.content)).render(), extensions=app.yasifipo["markdown_process"])) page.get_menus(yaml) for plugin in app.plugins.values(): plugin.before_render(page, file_) page.get_generated_time() return render_template(layout, site=app.yasifipo["sitedata"], i18n=app.yasifipo['i18n'], page=page )
def on_any_event(event): if event.is_directory: return None elif event.event_type == 'created': # Take any action here when a file is first created. if pathlib.Path(event.src_path).suffix.find(".sw") == -1: # as file copy is not completed when the 'created' event is received, # loop until the file size it not increased and any other process # access it file_done = False file_size = -1 try: t_file_size = pathlib.Path(event.src_path).stat().st_size except: print(f"Abnormal : fail to get file size {event.src_path}") return # DO SOMETHING FIXME while file_size != t_file_size: file_size = t_file_size time.sleep(1) while not file_done: try: os.rename(event.src_path, event.src_path) file_done = True except: break if notification.lower() == "telegram": import telegram_send tg = telegram_send.MyTelegram() filename = pathlib.Path(event.src_path).name if pathlib.Path(event.src_path).suffix == ".md": with open(event.src_path) as f: post = frontmatter.load(f) songs = [line for line in post.content if line.startswith('*')] try: duration = post.metadata["podcast"]["duration"] except: duration = 0 if notification.lower() == "telegram": tg.send_msg(f"{filename}\n{duration}s {len(songs)} songs\n\n{post.content}") else: print(f"{filename} {duration}s {len(songs)} songs") else: print(f"{filename}\n({file_size} Bytes)") if notification.lower() == "telegram": tg.send_msg(f"{filename}\n({file_size} Bytes)") else: print(f"{filename}\n({file_size} Bytes)")
def test_no_frontmatter(self): "This is not a zen exercise." post = frontmatter.load('tests/no-frontmatter.txt') with codecs.open('tests/no-frontmatter.txt', 'r', 'utf-8') as f: content = f.read().strip() self.assertEqual(post.metadata, {}) self.assertEqual(post.content, content)
def get_final_post_front_matter(f): fm = frontmatter.load(f) d = fm.get('final_post_front_matter') s = "---\n" for key, value in d.items(): s += "{}: {}\n".format(key, value) s += "---\n\n" return s
def test_to_string(self): "Calling str(post) returns post.content" post = frontmatter.load("tests/yaml/hello-world.txt") # test unicode and bytes text = "Well, hello there, world." self.assertEqual(str(post), text) self.assertEqual(bytes(post), text.encode("utf-8"))
def test_toml(self): "load toml frontmatter" if toml is None: return post = frontmatter.load("tests/hello-toml.markdown") metadata = {"author": "bob", "something": "else", "test": "tester"} for k, v in metadata.items(): self.assertEqual(post[k], v)
def test_unicode_post(self): "Ensure unicode is parsed correctly" chinese = frontmatter.load('tests/chinese.txt') self.assertTrue(isinstance(chinese.content, six.text_type)) # this shouldn't work as ascii, because it's Hanzi self.assertRaises(UnicodeEncodeError, chinese.content.encode, 'ascii')
def _index_markdown(self, filepath): doc_id = filepath with open(filepath, "r", encoding="utf-8") as fin: metadata = {"path": filepath} doc = frontmatter.load(fin) if "tags" in doc: metadata["tags"] = doc["tags"] self.index_doc(doc.content, doc_id, metadata)
def find_location(name): for root, dirs, files in os.walk("_locations"): for f in files: location = frontmatter.load(os.path.join(root, f)) print(location["title"]) if location["title"] == name: return Location(location) raise Exception("No location found for {}".format(name))
def test_toml(self): "load toml frontmatter" if toml is None: return post = frontmatter.load('tests/hello-toml.markdown') metadata = {'author': 'bob', 'something': 'else', 'test': 'tester'} for k, v in metadata.items(): self.assertEqual(post[k], v)
def test_dumping_with_custom_delimiters(self): "dump with custom delimiters" post = frontmatter.load('tests/hello-world.markdown') dump = frontmatter.dumps(post, start_delimiter='+++', end_delimiter='+++') self.assertTrue('+++' in dump)
def get_entries(): entries = [] path = Path(ENTRIES_DIR).glob("*.md") for f in path: entry = frontmatter.load(f) entry['url'] = ENTRY_PATH + Path(f).stem entries.append(entry) return entries
def test_to_string(self): "Calling str(post) returns post.content" post = frontmatter.load('tests/hello-world.markdown') # test unicode and bytes text = "Well, hello there, world." self.assertEqual(six.text_type(post), text) self.assertEqual(six.binary_type(post), text.encode('utf-8'))
def test_inner_template(self): "Ensure post content is rendered as its own template" post = frontmatter.load('tests/markup/template.md') post.content = self.env.from_string(post.content).render(post.metadata) test = self.stack.get('template.md') self.assertEqual(test.content, post.content)
def test_no_frontmatter(self): "This is not a zen exercise." post = frontmatter.load("tests/empty/no-frontmatter.txt") with codecs.open("tests/empty/no-frontmatter.txt", "r", "utf-8") as f: content = f.read().strip() self.assertEqual(post.metadata, {}) self.assertEqual(post.content, content)
def test_no_handler(self): "default to YAMLHandler when no handler is attached" post = frontmatter.load('tests/hello-world.markdown') del post.handler text = frontmatter.dumps(post) self.assertIsInstance( frontmatter.detect_format(text, frontmatter.handlers), YAMLHandler)
def get_current_human_version(internal_version): """ Returns version from frontmatter (used for tagging and displaying) """ md_path = os.path.join(RELEASED_DIR, "%s.md" % internal_version) with open(md_path, "rb") as md_file: data = frontmatter.load(md_file) return data["version"].strip()
def test_dumping_with_custom_delimiters(self): "dump with custom delimiters" post = frontmatter.load("tests/hello-world.markdown") dump = frontmatter.dumps(post, start_delimiter="+++", end_delimiter="+++") self.assertTrue("+++" in dump)
def _load_post(filename, article, meta_types): post = frontmatter.load(filename) for key in post.keys(): if isinstance(post[key], datetime) and not post[key].tzinfo: # no timezone means UTC post[key] = pytz.utc.localize(post[key]) article.content = post.content _set_attributes(article.info, post.metadata, meta_types)
def _fix_dates_for_article(repofile): abs_article_file = os.path.join(current_app.yawt_root_dir, repofile) post = frontmatter.load(abs_article_file) now = _now() if 'create_time' not in post.metadata: post['create_time'] = now post['modified_time'] = now save_file(abs_article_file, frontmatter.dumps(post, Dumper=ExplicitDumper))
def load_md(fn): """ Load a yaml text blob from a markdown file and parse the blob. Returns a tuple (yaml_obj, markdown_text) """ onto_stuff = frontmatter.load(fn) return (onto_stuff.metadata, onto_stuff.content)
def get_item(dataobj_id): """Returns a Post object with the given dataobjs' attributes""" file = get_by_id(dataobj_id) if file: data = frontmatter.load(file) data["fullpath"] = str(file) return data return None
def merge_citations(fdir): p = Path(fdir) fnames = [f for f in p.glob("*.md") if f.name not in ["index.md"]] fm = [frontmatter.load(f) for f in fnames] all_cite = [f.get("cite", None) for f in fm] return all_cite
def process(options, dirparam): logodir = os.path.abspath(dirparam) logohandle = os.path.basename(logodir) #print("INFO: processing %s (%s)" % (logohandle, logodir)) files = [ f for f in os.listdir(logodir) if os.path.isfile(os.path.join(logodir, f)) ] images = [] skipped = 0 for f in files: if f.endswith(".svg") == False and f.endswith(".png") == False: if f.endswith(".ai") or f.endswith(".pdf") or f.endswith(".eps"): print("INFO: skipping " + f) skipped = skipped + 1 continue if f.endswith("_src.svg") or f.endswith(".png"): print("INFO: skipping " + f) skipped = skipped + 1 continue if f.startswith(logohandle + '-') == False: print("INFO: skipping " + f) skipped = skipped + 1 continue images.append(f) if len(images) == 0: print("WARNING: no images for %s" % logohandle) return indexfn = os.path.join(logodir, "index.md") if os.path.exists(indexfn) == False: print("WARNING: no index.md for %s" % logohandle) return #indexmd = frontmatter.loads("---\n---\n") else: indexmd = frontmatter.load(indexfn) indexmd['images'] = images #indexmd['skipped'] = skipped if "logohandle" not in indexmd.keys(): indexmd["logohandle"] = logohandle if "title" not in indexmd.keys(): indexmd["title"] = logohandle.capitalize() if "sort" not in indexmd.keys(): indexmd["sort"] = indexmd["title"].lower() f = open(indexfn, 'w') f.write(frontmatter.dumps(indexmd)) f.write('\n') f.close()
def test_to_dict(self): "Dump a post as a dict, for serializing" post = frontmatter.load('tests/network-diagrams.markdown') post_dict = post.to_dict() for k, v in post.metadata.items(): self.assertEqual(post_dict[k], v) self.assertEqual(post_dict['content'], post.content)
def get_team_data(self, team): p = 'frc%04d' % (int(int(team)/1000)*1000) sp = '%03d' % (int(int(team)/100)*100) team_path = abspath(join(dirname(__file__), '..', p, '_frc', sp, '%s.md' % team)) print("Path:", team_path) fm = frontmatter.load(team_path) return fm, team_path
def posts(self): for root, dirs, files in os.walk(self.post_dir): for fname in files: base, ext = os.path.splitext(fname) if self.post_exts is not None and ext not in self.post_exts: continue post = frontmatter.load(os.path.join(root, fname)) post.filename = fname yield post
def create_post(self, filename, data): with self.repository.open(filename, 'w+') as fp: post = frontmatter.load(fp) if 'metadata' in data: post.metadata = data['metadata'] if 'content' in data: #TODO: parse from media post.content = data['content'] fp.write(frontmatter.dumps(post)) return filename
def get_last_block_hash(block_path): blocks = os.listdir(block_path) if len(blocks) == 0: return None else: linked = [] for block_hash in blocks: block = frontmatter.load(f'{block_path}/{block_hash}') if 'prev' in block: linked.append(block['prev']) return [i for i in blocks if i not in linked][0]
def test_get_file(self): "Test getting a single processed file" post = frontmatter.load('tests/markup/ebola.md', filename='ebola.md', slug='ebola') post.content = bleach.clean(post.content, strip=True) post.content = markdown(post.content) test = self.stack.get('ebola.md') self.assertEqual(post.metadata, test.metadata) self.assertEqual(post.content, test.content)
def updateDBMarkdown(directory, function): data_to_write = [] for x in os.listdir(os.path.join(os.getcwd(), directory)): if x.endswith(".md"): with open(os.path.join(os.getcwd(), directory, x)) as file: data = frontmatter.load(file) data_to_write.append(data) data_to_write = sorted(data_to_write, key=lambda x: datetime.strptime(x["date"], "%A %B %d, %Y"), reverse=True) for data in data_to_write: print(data["date"]) function(data["title"], data["subtitle"], data.content, data["date"])
def sanity_check(self, filename, handler_type): "Ensure we can load -> dump -> load" post = frontmatter.load(filename) self.assertIsInstance(post.handler, handler_type) # dump and reload repost = frontmatter.loads(frontmatter.dumps(post)) self.assertEqual(post.metadata, repost.metadata) self.assertEqual(post.content, repost.content) self.assertEqual(post.handler, repost.handler)
def test_with_dots(self): "Parse frontmatter and text with ... as metadata end delimiter" post = frontmatter.load('tests/dots.markdown') metadata = {'title': 'Hello, world!', 'layout': 'post'} for k, v in metadata.items(): self.assertEqual(post[k], v) # test unicode and bytes text = "Well, hello there, world." self.assertEqual(six.text_type(post), text) self.assertEqual(six.binary_type(post), text.encode('utf-8'))
def edit_post(self, filename, data): # Replace post's data in file with self.repository.open(filename, 'r+') as fp: post = frontmatter.load(fp) if 'metadata' in data: post.metadata = data['metadata'] if 'content' in data: post.content = data['content'] fp.seek(0) fp.truncate() fp.write(frontmatter.dumps(post)) return filename
def main(input_path, base_path): env.loader = FileSystemLoader(base_path) with open(input_path) as input_file: page = frontmatter.load(input_file) template = env.get_template(page.get('template', 'base.html')) print(template.render( content=page.content, title=page.get('title'), ))
def _add_tags_for_article(repofile, searcher): abs_article_file = os.path.join(current_app.yawt_root_dir, repofile) post = frontmatter.load(abs_article_file) if 'tags' not in post.metadata: keywords = [keyword for keyword, _ in searcher.key_terms_from_text("content", post.content, numterms=3)] keyword_str = ",".join(keywords) usertags = input('Enter tags (default '+keyword_str+'): ') tags = usertags or keyword_str post['tags'] = tags save_file(abs_article_file, frontmatter.dumps(post))
def test_dump_to_file(self): "dump post to filename" post = frontmatter.load('tests/hello-world.markdown') tempdir = tempfile.mkdtemp() filename = os.path.join(tempdir, 'hello.md') frontmatter.dump(post, filename) with open(filename) as f: self.assertEqual(f.read(), frontmatter.dumps(post)) # cleanup shutil.rmtree(tempdir)
def update_layout_in_dir(directory, layout): """ Update "layout" key in frontmatter of all markdown files in a directory """ # Process each markdown file in turn for markdown_file in glob.iglob(directory + '/**/*.md', recursive=True): print("updating " + markdown_file) # Update frontmatter in file document = frontmatter.load(markdown_file) # Read file document['layout'] = layout # Update layout in frontmatter frontmatter.dump(document, markdown_file) # Write back to file