def add(post_path): """Add shorturl to a post.""" filename = basename(post_path) m = env.re_file_foramt.match(filename) date_obj = datetime.strptime( '{0}-{1}-{2}'.format(m.group('year'), m.group('month'), m.group('day')), '%Y-%m-%d') post_url = env.tpl_post_url.format(date=date_obj.strftime('%Y/%j'), slug=m.group('slug')) with codecs.open(join(env.root_dir, post_path), 'r', 'utf-8') as fobj: post = frontmatter.loads(fobj.read()) if post.get('shorturl'): abort('Post already has a short url: ' '{shorturl}.'.format(**post)) meta = post.to_dict() content = meta['content'] del (meta['content']) meta['shorturl'] = shorturl(post_url) with codecs.open(join(env.root_dir, post_path), 'w', 'utf-8') as fobj: new_post = frontmatter.Post(content, **meta) frontmatter.dump(new_post, fobj) puts('Added the short url: {shorturl}.'.format(**meta))
def __get_specification_post(self, spec_dic): spec_metadata = {} spec_post = frontmatter.Post('') for spec_field in spec_dic: spec_metadata[spec_field] = spec_dic[spec_field] spec_post.metadata = spec_metadata return spec_post
def insert(self): if self.validate(): extensions.set_max_id(extensions.get_max_id() + 1) self.id = extensions.get_max_id() self.date = datetime.now() data = { "type": self.type, "desc": self.desc, "title": str(self.title), "date": self.date.strftime("%x").replace("/", "-"), "tags": self.tags, "id": self.id, "path": self.path } if self.type == "bookmarks" or self.type == "pocket_bookmarks": data["url"] = self.url # convert to markdown file dataobj = frontmatter.Post(self.content) dataobj.metadata = data self.fullpath = create( frontmatter.dumps(dataobj), str(self.id) + "-" + dataobj["date"] + "-" + dataobj["title"], path=self.path, ) add_to_index(current_app.config['INDEX_NAME'], self) return self.id return False
def insert(self): if self.validate(): self.id = app.config["max_id"] data = { "type": self.type, "desc": self.desc, "title": str(self.title), "date": self.date.strftime("%x").replace("/", "-"), "tags": self.tags, "id": self.id, "path": self.path } if self.type == "bookmarks" or self.type == "pocket_bookmarks": data["url"] = self.url app.config["max_id"] += 1 # convert to markdown dataobj = frontmatter.Post(self.content) dataobj.metadata = data create(frontmatter.dumps(dataobj), str(self.id) + "-" + dataobj["date"] + "-" + dataobj["title"], path=self.path) print(add_to_index("dataobj", self)) return self.id return False
def create_post(entry): pattern = re.compile(r'((?P<date>[^:]*):\s+)?\s*(?P<body>.*)', re.MULTILINE | re.DOTALL) match = pattern.search(entry) metadata = {} metadata.update(CONFIG.get('frontmatter', {})) metadata.update(match.groupdict()) if metadata['date']: metadata['date'], _ = cal.parseDT(datetimeString=metadata['date'], tzinfo=timezone('UTC')) else: metadata['date'] = datetime.now(tz=timezone('UTC')) categories = re.findall(r'@([^@\s]+)\b', metadata['body']) if categories: metadata['categories'] = categories tags = re.findall(r'#([^#\s]+)\b', metadata['body']) if tags: metadata['tags'] = tags content = metadata['body'].strip() del metadata['body'] if CONFIG.get('references'): metadata['references'] = [] for url in extractor.gen_urls(content): metadata['references'].append(pyunfurl.unfurl(url)) return frontmatter.Post(content, **metadata)
def dump_recursive_comments(rpc, post_author, post_permlink, depth=0, format="markdown"): global currentThreadDepth postWrapper = TextWrapper() postWrapper.width = 120 postWrapper.initial_indent = " " * (depth + currentThreadDepth) postWrapper.subsequent_indent = " " * (depth + currentThreadDepth) depth = int(depth) posts = rpc.get_content_replies(post_author, post_permlink) for post in posts: meta = {} for key in ["author", "permlink"]: meta[key] = post[key] meta["reply"] = "@{author}/{permlink}".format(**post) if format == "markdown": body = markdownify(post["body"]) else: body = post["body"] yaml = frontmatter.Post(body, **meta) print(frontmatter.dumps(yaml)) reply = rpc.get_content_replies(post["author"], post["permlink"]) if len(reply): dump_recursive_comments(rpc, post["author"], post["permlink"], depth + 1)
def process_markdown(context): # extend markdown context with custom values for f in contexts_register: f(context) if context.get('setup_only'): return context.markdown tmplfile = select_template(context.indicator, context.area, templatesdir=context.templates_dir) tmpl = jinja2.Template(open(tmplfile).read()) os.makedirs(context.folder, exist_ok=True) kwargs = context.template_kwargs() text = tmpl.render(**kwargs) md_file = context.markdown post = frontmatter.Post(text, **context.metadata) frontmatter.dump(post, md_file) return md_file
def insert(self): if self.validate(): self.id = extensions.get_max_id() data = { "type": self.type, "desc": self.desc, "title": str(self.title), "date": self.date.strftime("%x").replace("/", "-"), "tags": self.tags, "id": self.id, "path": self.path } if self.type == "bookmarks" or self.type == "pocket_bookmarks": data["url"] = self.url extensions.set_max_id(self.id + 1) # convert to markdown dataobj = frontmatter.Post(self.content) dataobj.metadata = data create(frontmatter.dumps(dataobj), str(self.id) + "-" + dataobj["date"] + "-" + dataobj["title"], path=self.path) add_to_index(Config.INDEX_NAME, self) return self.id return False
def create_front_matter(self): if not self.format or self.format == 'none': return '' if len( self.contents ) == 0: # if there is no meta data do not create an empty header return '' # if frontmatter.checks(content): # _, content = frontmatter.parse(content) # remove metadata if pandoc has added it (pandoc v2.13 and above) if self.format == 'text': return self.generate_plain_text_front_matter() front_matter_post = frontmatter.Post('') # iterate metadata items rather than using "frontmatter.Post(content, **self._metadata)" # because POST init can not accept a meta data field that has a key of 'content' which is common in html # and likely in other files as well for key, value in self.contents.items(): front_matter_post[key] = value if self.format == 'yaml': return frontmatter.dumps(front_matter_post, handler=frontmatter.YAMLHandler()) if self.format == 'toml': return frontmatter.dumps(front_matter_post, handler=frontmatter.TOMLHandler()) if self.format == 'json': return frontmatter.dumps(front_matter_post, handler=frontmatter.JSONHandler())
def dump_recursive_parents(rpc, post_author, post_permlink, limit=1, format="markdown"): global currentThreadDepth limit = int(limit) postWrapper = TextWrapper() postWrapper.width = 120 postWrapper.initial_indent = " " * (limit) postWrapper.subsequent_indent = " " * (limit) if limit > currentThreadDepth: currentThreadDepth = limit + 1 post = rpc.get_content(post_author, post_permlink) if limit and post["parent_author"]: parent = rpc.get_content_replies(post["parent_author"], post["parent_permlink"]) if len(parent): dump_recursive_parents(rpc, post["parent_author"], post["parent_permlink"], limit - 1) meta = {} for key in ["author", "permlink"]: meta[key] = post[key] meta["reply"] = "@{author}/{permlink}".format(**post) if format == "markdown": body = markdownify(post["body"]) else: body = post["body"] yaml = frontmatter.Post(body, **meta) print(frontmatter.dumps(yaml))
def write(self, path: pathlib.Path) -> None: if not path.is_dir(): raise NotADirectoryError(f'{str(path)} is not a directory') if not self._sync_notes: print('sync locked for', self._title) return print('updating', self._title) mod_date = max([anno.modified_date for anno in self._annotations]) mod_date_str = mod_date.isoformat() fmpost = frontmatter.Post(self.content, asset_id=self._asset_id, title=self.title, author=self.author, modified_date=mod_date_str) fn = path / self._filename with open(fn, 'w') as f: s = frontmatter.dumps(fmpost) f.write(s)
def insert(self): """Creates a new file with the object's attributes""" if self.validate(): helpers.set_max_id(helpers.get_max_id() + 1) self.id = helpers.get_max_id() self.date = datetime.now() hooks = helpers.load_hooks() hooks.before_dataobj_create(self) data = { "type": self.type, "title": str(self.title), "date": self.date.strftime("%x").replace("/", "-"), "tags": self.tags, "id": self.id, "path": self.path } if self.type == "bookmark" or self.type == "pocket_bookmark": data["url"] = self.url # convert to markdown file dataobj = frontmatter.Post(self.content) dataobj.metadata = data self.fullpath = create( frontmatter.dumps(dataobj), f"{self.id}-{dataobj['title']}", path=self.path, ) hooks.on_dataobj_create(self) self.index() return self.id return False
def dump(self, post: Post) -> None: metadata = json.loads(post.json(exclude={"canonical_url", "filepath"})) content = markdownify(metadata.pop("content")) frontmatter_post = frontmatter.Post(content, **metadata) frontmatter.dump(frontmatter_post, post.filepath, encoding="utf-8")
def save(self): self.clean() current_path = self.get_path() if self.path and current_path != self.path and Path( self.path).exists(): Path(self.path).unlink() with open(current_path, "wb") as out_file: frontmatter.dump( frontmatter.Post(content=self.text, **self.metadata), out_file) out_file.write(b"\n") self.path = current_path return current_path
def get_question(question_id): title_slug = get_title_slug(question_id) question = get_question_info(title_slug) data = {} data['title'] = str(question_id) + '. ' + question['title'] data['date'] = datetime.datetime.now().isoformat() data['tags'] = [tag['name'] for tag in question['topicTags']] + ['LeetCode'] data['categories'] = ['LeetCode'] url = 'https://leetcode-cn.com/problems/' content = '今天的题目是[%s](%s)。\n\n' % (data['title'], url + title_slug) return title_slug, frontmatter.Post(content, **data)
def generate_wps_options(docs: Path) -> None: """Generate configuration defaults for current version of WPS.""" from wemake_python_styleguide.options import defaults docs.mkdir(parents=True, exist_ok=True) with open(defaults.__file__, 'r') as f: module = libcst.parse_module(f.read()) for statement in module.body: assignment = statement.body[0] if not isinstance(assignment, libcst.AnnAssign): continue name = assignment.target.value value = getattr(defaults, name) last_leading_line = statement.leading_lines[-1] description = last_leading_line.comment.value.lstrip( '#: ', ).replace('``', '`') reasoning = format_reasoning(statement.trailing_whitespace.comment) cli_name = '--' + name.lower().replace('_', '-') parameter = WPSConfigurationParameter( about=f'python://wemake_python_styleguide.options.defaults.{name}', name=name, cli_name=cli_name, value=str(value), description=description, reasoning=reasoning, ) document = frontmatter.Post( content=parameter.description, handler=frontmatter.YAMLHandler(), **parameter.dict( exclude={'description'}, exclude_defaults=True, ), ) output_path = docs / f'{parameter.name}.md' with output_path.open('wb+') as output_file: frontmatter.dump( document, output_file, )
def document_violation( version_directory: Path, violation: BugbearViolation, ) -> None: """Write violation description into a Markdown file.""" document = frontmatter.Post(content=violation.message, handler=frontmatter.YAMLHandler(), **violation.dict( exclude={'message', 'type', 'vars'}, exclude_none=True, )) (version_directory / f'{violation.code}.md').write_text( frontmatter.dumps(document), )
def main(): for filename in sorted(glob("posts/*.md"), reverse=True): with open(filename, "r") as f: post = frontmatter.load(f) new_metadata = post.metadata.copy() new_guid = "http://friendlybit.com" + post.metadata["permalink"] if new_guid != new_metadata["guid"]: new_metadata["guid"] = new_guid with open(filename, "wb") as f: new_post = frontmatter.Post(post.content, **new_metadata) frontmatter.dump(new_post, f) f.write(b"\n")
def save(self): self.clean() current_path = self.get_path() if self.path and current_path != self.path: if Path(self.path).exists(): Path(self.path).unlink() for other_file in self.path.parent.glob("*"): other_file.rename(current_path.parent / other_file.name) with open(current_path, "wb") as out_file: frontmatter.dump( frontmatter.Post(content=self.text, **self.metadata), out_file) out_file.write(b"\n") self.path = current_path return current_path
def main(): for filename in sorted(glob("posts/*.md"), reverse=True): with open(filename, "r") as f: comment_post = frontmatter.load(f) new_metadata = {} for field, data in comment_post.metadata.items(): if field not in WHITELISTED_METADATA: continue new_metadata[field] = data with open(filename, "wb") as f: new_comment_post = frontmatter.Post(comment_post.content, **new_metadata) frontmatter.dump(new_comment_post, f)
def write_file(content, location, **frontmatter_data): """ :param content: `str` the main file content (excluding frontmatter) :param location: `str` path to write the file to :param frontmatter: `splat` of frontmatter keys / values to write to the file :return: """ dirname = os.path.dirname(location) if not is_dir(dirname): os.makedirs(dirname) jekyll_post = frontmatter.Post(content, **frontmatter_data) with open(location, 'wb') as pfile: frontmatter.dump(jekyll_post, pfile)
def save(self, data): try: post = frontmatter.Post(data.get('content')) post.metadata = data.get('metadata') for key, value in post.metadata.items(): default_value = '' if key not in ['sessions'] else [] post.metadata[key] = value if value else default_value file_path = self.md_file_path(data['path'] ) Files.write(file_path, frontmatter.dumps(post)) if Files.exists(file_path): return { 'status': 'ok', 'data': data } else: return {'status': 'error', 'data': 'file not saved ok: {0}'.format(file_path) } except Exception as error: return { 'status': 'error', 'data': error }
def post_to_markdown(json_filepath, shortcode, post_types): post_files = [ f for f in os.listdir(INSTA_DIR) if f.startswith(json_filepath[:-8]) ] txt_file = None img_files = [] for post_file in post_files: if post_file.endswith(".txt"): txt_file = post_file if post_file.endswith(".jpg"): img_files += [post_file] if txt_file is not None and len(img_files) > 0: with open(join(INSTA_DIR, txt_file)) as f: txt_content = f.read() date = json_filepath[:10] year = date[0:4] month = date[5:7] slug = f"ig-{shortcode}" aws_img_urls = s3_upload(img_files, year, month) aws_img_html = img_urls_to_html(aws_img_urls) md_content = md( f'<p>{txt_content}</p>{aws_img_html}<br><a href="https://www.instagram.com/p/{shortcode}/">View on Instagram</a>' ) post_md = fm.Post(md_content) post_md['instagram_shortcode'] = shortcode post_md['date'] = date post_md['slug'] = slug post_md['title'] = txt_content[:140].replace('\n', '') post_md['aws_media_src_url'] = aws_img_urls[0] for post_type in post_types: md_filepath = join(STATIC_DIR, post_type, year, month, f"{slug}.md") os.makedirs(os.path.dirname(md_filepath), exist_ok=True) with open(md_filepath, 'wb') as f: fm.dump(post_md, f) return post_md return None
def create_markdown_file(filename: str): content = '' title = '' with open(os.path.join(CSV_DIR, filename)) as f: filereader = csv.reader(f) for index, row in enumerate(filereader): if index == 1: title = row[0] if 'Highlight' in row[0]: content += '"{}"'.format(row[3]) content += '\n\n' content += '---------' content += '\n\n' post = frontmatter.Post(content) post['layout'] = 'page' post['title'] = title.capitalize() with open(os.path.join(HIGHLIGHTS_DIR, filename + '.md'), 'w') as f: print(frontmatter.dumps(post), file=f)
def write_violations_to_disk( violations: Iterator[Violation], directory: Path, ) -> None: for violation in violations: violation_path = directory / f'{violation.code}.md' document = frontmatter.Post( content=violation.description, handler=frontmatter.YAMLHandler(), **violation.dict( exclude={'description', 'output_file'}, by_alias=True, exclude_none=True, ), ) with open(violation_path, 'wb+') as code_file: frontmatter.dump(document, code_file)
def main(): feed_url = 'http://blog.marksteve.com/feed' page = 1 while True: d = feedparser.parse('{}?page={}'.format(feed_url, page)) if not d.entries: break for entry in d.entries: id = entry.link.rsplit('/', 1)[1] body = html2text(entry.content[0]['value']) title = entry.title y, m, d = entry.published_parsed[:3] publish_date = '{}-{:02d}-{:02d}'.format(y, m, d) post = frontmatter.Post(body, title=title, publish_date=publish_date) with open('posts/{}.md'.format(id), 'wb') as f: f.write(frontmatter.dumps(post).encode('utf8')) page += 1
def _create_review(self, trailer): review_date = date.today().strftime("%Y-%m-%d") name = self.template["movie_name"] self.template["date"] = date.today() nl = "\n" if trailer: trailer_part = f'{{% youtube "{trailer}" %}}' body = f"nothing {nl}{trailer_part}" else: body = "nothing" post = frontmatter.Post(body, **self.template) with open( os.path.join(self.project, "_posts", review_date + "-" + name + ".md"), "w") as f: f.write(frontmatter.dumps(post)) click.echo( click.style(f"The page for review {name} has been created", fg="green"))
def add_metadata_md_to_content(self, content): self.logger.debug(f"Add front matter meta-data to markdown page") if self._conversion_settings.front_matter_format == 'none': return content if len( self._metadata ) == 0: # if there is no meta data do not create an empty header return content if frontmatter.checks(content): _, content = frontmatter.parse( content ) # remove metadata if pandoc has added it (pandoc v2.13 and above) if self._conversion_settings.front_matter_format == 'text': content = self.add_text_metadata_to_content(content) return content merged_content = frontmatter.Post(content) # iterate metadata items rather than using "frontmatter.Post(content, **self._metadata)" # because POST init can not accept a meta data field that has a key of 'content' which is common in html # and likely in other files as well self.format_ctime_and_mtime_if_required() self.change_displayed_created_time_if_required() self.change_displayed_modified_time_if_required() for key, value in self._metadata.items(): merged_content[key] = value self._force_pandoc_markdown_to_yaml_front_matter() if self._conversion_settings.front_matter_format == 'yaml': content = frontmatter.dumps(merged_content, handler=frontmatter.YAMLHandler()) if self._conversion_settings.front_matter_format == 'toml': content = frontmatter.dumps(merged_content, handler=frontmatter.TOMLHandler()) if self._conversion_settings.front_matter_format == 'json': content = frontmatter.dumps(merged_content, handler=frontmatter.JSONHandler()) return content
def create_version_index_md(docs: Path, package_version: str) -> None: """Write version description.""" v = version.parse(package_version) post = frontmatter.Post( content='', handler=frontmatter.YAMLHandler(), major_version=v.major, minor_version=v.minor, patch_version=v.micro, ) post_content = frontmatter.dumps(post) text_content = f'{post_content}\n\n' try: (docs / 'index.md').write_text(text_content) except FileNotFoundError: docs.mkdir(parents=True, exist_ok=True) (docs / 'index.md').write_text(text_content)
def main(folder, title, output_path): p = Path(folder) # Read Markdown content files = [p / 'README.md'] + sorted(list(p.glob('[0-9]*.md'))) if Path('./images').exists(): raise ValueError("images folder already exists. Should not exist for symlinks") if len(files) < 2: raise ValueError(f"Did not find Markdown files in {str(p)}") lines = [] for file in files: with file.open() as f: lines.extend(file_content(f)) # Create a single file with all content metadata = build_metadata(title) content = frontmatter.Post(''.join(clean_lines(lines)), **metadata) with open('tmp.md', 'w') as f: f.writelines(frontmatter.dumps(content)) # Build PDF with Pandoc os.makedirs(Path(output_path).parent, exist_ok=True) os.symlink(p / 'images/', Path('./images')) # See https://pandoc.org/MANUAL.html # especially "Markdown variants" os.system(f''' pandoc --toc -s tmp.md -o {output_path} \ --template "eisvogel.latex" \ --from markdown+lists_without_preceding_blankline \ --filter pandoc-latex-environment \ --pdf-engine=xelatex ''') # Clean up os.unlink(Path('./images')) os.remove('tmp.md') click.echo(f"PDF file has been generated at {output_path}")