def create_yaml(api_key, talks_dir, speakers_dir): if not exists(talks_dir): makedirs(talks_dir) if not exists(speakers_dir): makedirs(speakers_dir) for proposal_state in PROPOSAL_STATES: r = get( 'https://www.papercall.io/api/v1/submissions?_token={0}&state={1}&per_page=1000' .format( api_key, proposal_state, )) speakers = {} for proposal in r.json(): talk_title_slug = slugify(proposal['talk']['title']) post = frontmatter.loads(proposal['talk']['description']) post['type'] = 'talk' post['title'] = proposal['talk']['title'] post['level'] = proposal['talk']['audience_level'] post['abstract'] = proposal['talk']['abstract'] post['speakers'] = [] speaker_name = proposal['profile']['name'] if '/' in speaker_name: speaker_name = speaker_name.split('/') elif ' and ' in speaker_name: speaker_name = speaker_name.split(' and ') elif ',' in speaker_name and speaker_name[-5:] != ', MBA': speaker_name = speaker_name.split(',') else: speaker_name = [speaker_name] for name in map(str.strip, speaker_name): speaker_slug = slugify(name) if speaker_slug not in speakers: speakers[speaker_slug] = frontmatter.loads( proposal['profile']['bio']) speakers[speaker_slug]['name'] = name speakers[speaker_slug]['talks'] = [] post['speakers'].append(name) speakers[speaker_slug]['talks'].append(post['title']) talk_filename = '{}/{}.md'.format(talks_dir, talk_title_slug) with open(talk_filename, 'wb') as file_to_write: frontmatter.dump(post, file_to_write) print('saved {!r}'.format(talk_filename)) for speaker_slug, speaker in speakers.items(): speaker_filename = '{}/{}.md'.format(speakers_dir, speaker_slug) with open(speaker_filename, 'wb') as file_to_write: frontmatter.dump(speaker, file_to_write) print('saved {!r}'.format(speaker_filename))
def add_frontmatter(directory): # scan .md files in this directory and add 'order' to frontmatter if not present mdfiles = sorted( list(glob.iglob(directory.path + '/*.md', recursive=False))) extant_orders = 0 new_orders = 0 for mdfile in mdfiles: print(mdfile) with open(mdfile, 'r') as f: post = frontmatter.load(f) if 'order' in post.metadata or 'Order' in post.metadata: extant_orders += 1 else: post.metadata['order'] = STEP_SIZE + (mdfiles.index(mdfile) * STEP_SIZE) new_orders += 1 # write to file frontmatter.dump(post, mdfile) if new_orders and extant_orders: print( f'WARNING: {directory.path} contains a mix files with existing and new orders in their frontmatter' ) # recursively scan subdirectories subdirs = [ e for e in scandir(directory) if e.name[0] != '.' and e.is_dir(follow_symlinks=False) ] for subdir in subdirs: add_frontmatter(subdir)
def set_shortlink(self, post, link): fname = getattr(post, 'filename', None) if not fname: return post[self.shortlink_key] = link with open(os.path.join(self.post_dir, fname), 'w') as f: frontmatter.dump(post, f)
def writepage (eventjson, p): fpath = pagepath(eventjson) with open(fpath, mode="w+b") as f: o = io.BytesIO() frontmatter.dump(p, o, handler=frontmatter.YAMLHandler()) f.write(o.getvalue()) print("Wrote page for event " + eventjson["name"] + " to " + fpath)
def process_markdown(context): # extend markdown context with custom values for f in contexts_register: f(context) if context.get('setup_only'): return context.markdown tmplfile = select_template(context.indicator, context.area, templatesdir=context.templates_dir) tmpl = jinja2.Template(open(tmplfile).read()) os.makedirs(context.folder, exist_ok=True) kwargs = context.template_kwargs() text = tmpl.render(**kwargs) md_file = context.markdown post = frontmatter.Post(text, **context.metadata) frontmatter.dump(post, md_file) return md_file
def parse_front_matter(self): self.bsc_spec_list = self.bsc_file_manager.get_specification_list() all_specs_formatted=self.__get_all_specs_dic() for formatted_spec in all_specs_formatted: temp_spec_post=self.__get_specification_post(formatted_spec) if formatted_spec['spec_type'] == 'Type': temp_spec_post.metadata['layout']= 'new_type_detail' else: temp_spec_post.metadata['layout']= 'new_spec_detail' md_fm_bytes = BytesIO() temp_spec_post.metadata['version'] = str(temp_spec_post.metadata['version']) frontmatter.dump(temp_spec_post, md_fm_bytes) spec_name=temp_spec_post.metadata['name'] self.__create_spec_folder_struct(spec_name) spec_md_file_path=self.md_files_path + spec_name self.__write_README(spec_md_file_path, formatted_spec) with open(spec_md_file_path+ '/specification.html', 'w') as outfile: temp_str=str(md_fm_bytes.getvalue(),'utf-8') outfile.write(temp_str) outfile.close() print ('%s MarkDown file generated.' % temp_spec_post.metadata['name']) os.remove(self.creds_file_path) print('Goggle Drive connection closed and credit file deleted.') print ('All Jekyll formatted MarkDown files generated.')
def fix_post(data_file): handler = RuamelYAMLHandler() with open(data_file) as f: doc = frontmatter.load(f, handler=handler) if 'categories' not in doc.metadata: raise ValueError('no categories') categories = doc.metadata['categories'] if 'laporan' not in categories: return if doc.metadata.get('layout') == 'laporancmb': category = 'CMB' else: category = 'CMS' if category not in categories: doc.metadata['categories'] = categories[0:-1] + [ category, categories[-1] ] with open(data_file, 'wb') as f: frontmatter.dump(doc, f, handler=handler) with open(data_file, 'ab') as f: f.write(b'\n')
def rename(args: ap.Namespace) -> None: """The function corresponding to the `rename` command that changes the title of a task.""" frontmatter = _get_fm(args.paths, args.non_recursive) # Iterate through each target provided # First check if the target is an id, if so, use the gathered # frontmatter to find the corresponding path if re.match(r'^[a-z]{3}$', args.target): for item in frontmatter: if item['__id__'] == args.target: f = BytesIO() file_fm = fm.load(item['__path__']) file_fm.content = file_fm.content.replace( "# " + item['__title__'], "# " + args.title) fm.dump(file_fm, f) new_path = item['__path__'].replace(item['__title__'], args.title) if os.path.exists(new_path): print(f'Path "{new_path}" already exists') return with open(new_path, 'w') as file: file.write(f.getvalue().decode('utf8') + '\n') os.remove(item['__path__']) break else: print(f'No task with id: "{args.target}" found') # If the target is not an id, but is a valid path, use the same logic # as in the past case to merge properties and write the file elif os.path.exists(args.target): f = BytesIO() file_fm = fm.load(args.target) info = _get_info(args.target) if info is None: print(f'Could not get info for task "{args.target}"') return file_fm.content = file_fm.content.replace("# " + info['__title__'], "# " + args.title) fm.dump(file_fm, f) new_path = args.title + \ args.target.removeprefix(info['__title__']) if os.path.exists(new_path): print(f'Path "{new_path}" already exists') return with open(new_path, 'w') as file: file.write(f.getvalue().decode('utf8') + '\n') os.remove(args.target) # Otherwise if none of the above worked, inform the user that the # target was not found else: print(f'Could not parse task "{args.target}"')
def add(post_path): """Add shorturl to a post.""" filename = basename(post_path) m = env.re_file_foramt.match(filename) date_obj = datetime.strptime( '{0}-{1}-{2}'.format(m.group('year'), m.group('month'), m.group('day')), '%Y-%m-%d') post_url = env.tpl_post_url.format(date=date_obj.strftime('%Y/%j'), slug=m.group('slug')) with codecs.open(join(env.root_dir, post_path), 'r', 'utf-8') as fobj: post = frontmatter.loads(fobj.read()) if post.get('shorturl'): abort('Post already has a short url: ' '{shorturl}.'.format(**post)) meta = post.to_dict() content = meta['content'] del (meta['content']) meta['shorturl'] = shorturl(post_url) with codecs.open(join(env.root_dir, post_path), 'w', 'utf-8') as fobj: new_post = frontmatter.Post(content, **meta) frontmatter.dump(new_post, fobj) puts('Added the short url: {shorturl}.'.format(**meta))
def dump(self, post: Post) -> None: metadata = json.loads(post.json(exclude={"canonical_url", "filepath"})) content = markdownify(metadata.pop("content")) frontmatter_post = frontmatter.Post(content, **metadata) frontmatter.dump(frontmatter_post, post.filepath, encoding="utf-8")
def main(input, output, extra_context): chunk = input.read() post = frontmatter.loads(chunk) if extra_context: post.metadata.update(extra_context) frontmatter.dump(post, output)
def prettify(args): for file in args.files: handler = CustomRuamelYAMLHandler() text = frontmatter.load(file, handler=handler) file_obj = open(file, "wb") frontmatter.dump(text, fd=file_obj, handler=handler) file_obj = open(file, 'a') file_obj.write('\n') file_obj.close()
def save_frontmatter(self): try: frontmatter.dump(self.front_matter, self.absfile, allow_unicode=True) except Exception as ex: print "Error save frontmatter for file:{0}".format(self.absfile) traceback.print_exc() return False return True
def enforceOrder(list_to_be_ordered): print(list_to_be_ordered) for index, post in enumerate(list_to_be_ordered): post_to_be_altered = fm.load(str(post)) if folder_path == "python": # accounts for the fact that this is also run in the plotly.py-docs repo post_to_be_altered.metadata["jupyter"]["plotly"]['order'] = index+1 fm.dump(post_to_be_altered, str(post)) else: post_to_be_altered.metadata['order'] = index+1 fm.dump(post_to_be_altered, str(post))
def create_systems_pages(): print(f"Creating systems page") with open(TEMPLATE_FOLDERS['systems']) as f: templatemd = frontmatter.load(f) target = copy.deepcopy(TARGET_FOLDERS['systems']) targetfile = TARGET_FOLDERS['path'] + target os.makedirs(os.path.dirname(targetfile), exist_ok=True) with open(targetfile, 'w') as f: frontmatter.dump(templatemd, targetfile)
def writeToMd(filename, post): print("writing to " + filename) filez = open(filename, 'w', encoding='utf-8') from io import BytesIO f = BytesIO() frontmatter.dump(post, f) doc = f.getvalue().decode('utf-8') clean_data = re.sub("(</?strong[^>]*>)", "", doc, 0, re.IGNORECASE | re.DOTALL | re.MULTILINE) filez.write(clean_data) filez.close()
def write_post(post_type, date, slug, post): file_path = TEMP_FILE_PATH.format(date=date.strftime('%Y-%m-%d'), type=post_type, slug=slug) upload_path = TEMP_UPLOAD_PATH.format(date=date.strftime('%Y/%j'), slug=slug) with open(join(dirname(realpath(__file__)), file_path), 'w') as fobj: dump(post, fobj) if not exists(upload_path): makedirs(upload_path)
def save(self, fieldname): """ Save a page to a textfile. Parameters ---------- filename : str The path to the file to be loaded. """ with open(filename, "w") as f: frontmatter.dump(self.content, f)
def done(args: ap.Namespace) -> None: """The function corresponding to the `done` command that marks the tasks with the given targets as done.""" frontmatter = _get_fm(args.paths, args.non_recursive) # Iterate through each target provided for target in args.targets: # First check if the target is an id, if so, use the gathered # frontmatter to find the corresponding path if re.match(r'^[a-z]{3}$', target): for item in frontmatter: if item['__id__'] == target: # Once the target has been found, load the file with then # frontmatter module, merge a `done: true` property with # the frontmatter's `_tm` property, or inform the user # if this was not possible, then write the file f = BytesIO() file_fm = fm.load(item['__path__']) if '_tm' in file_fm and isinstance(file_fm['_tm'], dict): file_fm['_tm']['done'] = True elif '_tm' in file_fm and file_fm['_tm'] is not None: raise ValueError( f'{target} contains unsupported `_tm` key') else: file_fm['_tm'] = {'done': True} fm.dump(file_fm, f) with open(item['__path__'], 'w') as file: file.write(f.getvalue().decode('utf8') + '\n') break else: print(f'No task with id: "{target}" found') # If the target is not an id, but is a valid path, use the same logic # as in the past case to merge properties and write the file elif os.path.exists(target): f = BytesIO() file_fm = fm.load(target) if '_tm' in file_fm and \ (isinstance(file_fm['_tm'], dict) or file_fm['_tm'] is None): file_fm['_tm']['done'] = True elif '_tm' in file_fm: raise ValueError(f'{target} contains unsupported `_tm` key') else: file_fm['_tm'] = {'done': True} fm.dump(file_fm, f) with open(target, 'w') as file: file.write(f.getvalue().decode('utf8') + '\n') # Otherwise if none of the above worked, inform the user that the # target was not found else: print(f'Could not parse task "{target}"')
def write_backlinks_to_note(note_filename: str, backlinks: Set[str]): if len(backlinks) == 0: return note = frontmatter.load(os.path.join(NOTES_DIR, note_filename)) note['backlinks'] = { link[:-3]: _get_note_title(link) for link in backlinks } with open(os.path.join(NOTES_DIR, note_filename), 'wb') as f: frontmatter.dump(note, f)
def save(self): self.clean() current_path = self.get_path() if self.path and current_path != self.path and Path( self.path).exists(): Path(self.path).unlink() with open(current_path, "wb") as out_file: frontmatter.dump( frontmatter.Post(content=self.text, **self.metadata), out_file) out_file.write(b"\n") self.path = current_path return current_path
def push_to_file(self): self.path.touch() # serialise mongo fields and clean up metadata = {k: self[k] for k in self.fields} metadata['status'] = self.status.value metadata['keywords'] = list(self.keywords) file = self._parse_file() file.metadata = metadata file.content = self.content frontmatter.dump(file, self.path) return metadata
def test_dump_to_file(self): "dump post to filename" post = frontmatter.load('tests/hello-world.markdown') tempdir = tempfile.mkdtemp() filename = os.path.join(tempdir, 'hello.md') frontmatter.dump(post, filename) with open(filename) as f: self.assertEqual(f.read(), frontmatter.dumps(post)) # cleanup shutil.rmtree(tempdir)
def update_layout_in_dir(directory, layout): """ Update "layout" key in frontmatter of all markdown files in a directory """ # Process each markdown file in turn for markdown_file in glob.iglob(directory + '/**/*.md', recursive=True): print("updating " + markdown_file) # Update frontmatter in file document = frontmatter.load(markdown_file) # Read file document['layout'] = layout # Update layout in frontmatter frontmatter.dump(document, markdown_file) # Write back to file
def create_element_pages(elementname): print(f"Creating {elementname} page") with open(TEMPLATE_FOLDERS['elements']) as f: templatemd = frontmatter.load(f) templatemd['data']['element'] = elementname templatemd['title'] = 'echemdb - {} CV data'.format(elementname) target = copy.deepcopy(TARGET_FOLDERS['elements']).replace( 'tobesubstituted', elementname) targetfile = TARGET_FOLDERS['path'] + target os.makedirs(os.path.dirname(targetfile), exist_ok=True) with open(targetfile, 'w') as f: frontmatter.dump(templatemd, targetfile)
def process_vault(config): md_files = find_files(config['vault']['path'], ext='.md', exclusions=config['vault'].get( 'excluded_subdirectories', [])) asset_files = find_files(config['vault']['asset_path']) post_output_path = dir_exists_or_raise( config['output']['post_output_path'], 'post output path') asset_output_path = dir_exists_or_raise( config['output']['asset_output_path'], 'asset output path') relative_asset_path_prefix = config['output'].get( 'relative_asset_path_prefix', '{{ site.assets_location }}') post_link_mode = config['output'].get('post_link_mode', 'jekyll') if not post_link_mode in ['jekyll', 'hugo']: raise ValueError( f'Unknown post link mode "{post_link_mode}". must be set to either "jekyll" or "hugo".' ) copied_asset_files = write_asset_files(asset_files, asset_output_path) dated_files = {} post_map = {} for name, path in md_files.items(): name, ext = os.path.splitext(name) slug_name = slugify_md_filename(name) post = frontmatter.load(path) postdate = validate_postdate(path, str(post.metadata.get('date', ''))) dated_name = postdate + '-' + slug_name dated_name_ext = dated_name + ext dated_files[slug_name] = (dated_name, dated_name_ext, path) post_map[slug_name] = post rewriting_pipeline = RewritingPipeline( [ObsidianHighlightRewritingTransformer()]) rewrite_engine = RewritingEngine(transformer=rewriting_pipeline) for slug_name, data in dated_files.items(): _, dated_name_ext, path = data post = post_map[slug_name] # Allow find_replace to run on metadata in addition to post content post_text = find_replace(frontmatter.dumps(post), post.metadata) post = frontmatter.loads(post_text) post = rewrite_post_with_engine(rewrite_engine, post) rewritten = rewrite_links(post.content, dated_files, copied_asset_files, relative_asset_path_prefix, post_link_mode) post.content = rewritten with open(os.path.join(post_output_path, dated_name_ext), 'wb') as out: frontmatter.dump(post, out)
def generate_wps_options(docs: Path) -> None: """Generate configuration defaults for current version of WPS.""" from wemake_python_styleguide.options import defaults docs.mkdir(parents=True, exist_ok=True) with open(defaults.__file__, 'r') as f: module = libcst.parse_module(f.read()) for statement in module.body: assignment = statement.body[0] if not isinstance(assignment, libcst.AnnAssign): continue name = assignment.target.value value = getattr(defaults, name) last_leading_line = statement.leading_lines[-1] description = last_leading_line.comment.value.lstrip( '#: ', ).replace('``', '`') reasoning = format_reasoning(statement.trailing_whitespace.comment) cli_name = '--' + name.lower().replace('_', '-') parameter = WPSConfigurationParameter( about=f'python://wemake_python_styleguide.options.defaults.{name}', name=name, cli_name=cli_name, value=str(value), description=description, reasoning=reasoning, ) document = frontmatter.Post( content=parameter.description, handler=frontmatter.YAMLHandler(), **parameter.dict( exclude={'description'}, exclude_defaults=True, ), ) output_path = docs / f'{parameter.name}.md' with output_path.open('wb+') as output_file: frontmatter.dump( document, output_file, )
def main(): for filename in sorted(glob("posts/*.md"), reverse=True): with open(filename, "r") as f: post = frontmatter.load(f) new_metadata = post.metadata.copy() new_guid = "http://friendlybit.com" + post.metadata["permalink"] if new_guid != new_metadata["guid"]: new_metadata["guid"] = new_guid with open(filename, "wb") as f: new_post = frontmatter.Post(post.content, **new_metadata) frontmatter.dump(new_post, f) f.write(b"\n")
def save(self): self.clean() current_path = self.get_path() if self.path and current_path != self.path: if Path(self.path).exists(): Path(self.path).unlink() for other_file in self.path.parent.glob("*"): other_file.rename(current_path.parent / other_file.name) with open(current_path, "wb") as out_file: frontmatter.dump( frontmatter.Post(content=self.text, **self.metadata), out_file) out_file.write(b"\n") self.path = current_path return current_path
def save_article(output, article): try: article.content = article.metadata["content"] del article.metadata["content"] except KeyError as e: logger.error(f"Missing content in article metadata {e}") raise KeyError file_name = f"{article.metadata['title'].replace(' ', '-')}.md" file_path = os.path.join(output, file_name) with open(file_path, "wb") as f: try: frontmatter.dump(article, f) except PermissionError: logger.warning(f"Failed to save file at {file_path}.")
def load_all_curriculums_with_unknown_ids(curriculums_base_dir): for file_path in curriculum_file_paths(curriculums_base_dir): syllabus_frontmatter = frontmatter.load(file_path) if DB_ID in syllabus_frontmatter: # this one already has an id. Skip it continue defaults = get_creation_args_from_curricum_frontmatter( syllabus_frontmatter) curriculum = Curriculum.objects.create( id=Curriculum.get_next_available_id(), **defaults) syllabus_frontmatter[DB_ID] = curriculum.id with open(file_path, "wb") as f: frontmatter.dump(syllabus_frontmatter, f) set_up_single_curriculum_from_file(curriculum, file_path)
def write_file(content, location, **frontmatter_data): """ :param content: `str` the main file content (excluding frontmatter) :param location: `str` path to write the file to :param frontmatter: `splat` of frontmatter keys / values to write to the file :return: """ dirname = os.path.dirname(location) if not is_dir(dirname): os.makedirs(dirname) jekyll_post = frontmatter.Post(content, **frontmatter_data) with open(location, 'wb') as pfile: frontmatter.dump(jekyll_post, pfile)
#args = parser.parse_args() con = sqlite3.connect("_build/out/dv.db") con.row_factory = sqlite3.Row cur = con.cursor() cur.execute("select * from meta_article"); r = cur.fetchone() while r: md = pypandoc.convert(r['article'], 'markdown_strict', format='html') title_md = pypandoc.convert(r['title'], 'markdown_strict', format='html', extra_args=["--columns", "9999"]) md = fix(articlelink(md)) title_md = fix(title_md, title=True) post = frontmatter.loads("") post.content= md post['title'] = title_md post['date'] = r['date'] post['author'] = r['author'] post['id'] = r['id'] f = "_meta/" + r['date'] + "-" + yamltitle(title_md) + ".md" out = codecs.open(f, 'w', 'utf-8') frontmatter.dump(post, out, Dumper=frontmatter.yaml.Dumper, allow_unicode=True) r = cur.fetchone()
continue png = stem.group(1) + ".png" fname = re.search("/([^/]+)$", i).group(1) if re.match("[cC]olor", i): pass elif os.path.isfile(i) and is_grey_scale(i): print "Skipping " + i elif os.path.isfile(i) and not os.path.isfile(png): print i subprocess.call([ waifu2x, "-i", imagepath(i), "-o", png, "--noise_level", "2", "--scale_ratio", "1", "--model_dir", waifu_dir + "models" ]) subprocess.call(["git", "rm", i]) if os.path.isfile(png): print fname changed = True newname = re.sub("\.(jpe?g|gif)$", ".png", fname) replace_name(post, fname, newname) if changed: frontmatter.dump(post, args.fm_src, Dumper=frontmatter.yaml.Dumper, allow_unicode=True) print "Updated FM " + args.fm_src