def test_write_metadata_with_formats(): data = { 'slug': 'hello-world', 'title': 'Hello, world!', 'b': '2', 'a': '1' } # Nikola: defaults first, then sorted alphabetically # YAML: all sorted alphabetically # TOML: insertion order (py3.6), random (py3.5 or older) assert write_metadata(data, 'nikola') == """\ .. title: Hello, world! .. slug: hello-world .. a: 1 .. b: 2 """ assert write_metadata(data, 'yaml') == """\ --- a: '1' b: '2' slug: hello-world title: Hello, world! --- """ toml = write_metadata(data, 'toml') assert toml.startswith('+++\n') assert toml.endswith('+++\n') assert 'slug = "hello-world"' in toml assert 'title = "Hello, world!"' in toml assert 'b = "2"' in toml assert 'a = "1"' in toml
def test_write_metadata_with_formats(): data = {'slug': 'hello-world', 'title': 'Hello, world!', 'b': '2', 'a': '1'} # Nikola: defaults first, then sorted alphabetically # YAML: all sorted alphabetically # TOML: insertion order (py3.6), random (py3.5 or older) assert write_metadata(data, 'nikola') == """\ .. title: Hello, world! .. slug: hello-world .. a: 1 .. b: 2 """ assert write_metadata(data, 'yaml') == """\ --- a: '1' b: '2' slug: hello-world title: Hello, world! --- """ toml = write_metadata(data, 'toml') assert toml.startswith('+++\n') assert toml.endswith('+++\n') assert 'slug = "hello-world"' in toml assert 'title = "Hello, world!"' in toml assert 'b = "2"' in toml assert 'a = "1"' in toml
def test_write_metadata_from_site_and_fallbacks(): site = dummy() site.config = {'METADATA_FORMAT': 'yaml'} data = {'title': 'xx'} assert write_metadata(data, site=site) == '---\ntitle: xx\n---\n' assert write_metadata(data) == '.. title: xx\n\n' assert write_metadata(data, 'foo') == '.. title: xx\n\n' assert write_metadata(data, 'filename_regex') == '.. title: xx\n\n'
def test_write_metadata_pelican_detection(): rest_fake, md_fake, html_fake = dummy(), dummy(), dummy() rest_fake.name = 'rest' md_fake.name = 'markdown' html_fake.name = 'html' data = {'title': 'xx'} assert write_metadata(data, 'pelican', compiler=rest_fake) == '==\nxx\n==\n\n' assert write_metadata(data, 'pelican', compiler=md_fake) == 'title: xx\n\n' assert write_metadata(data, 'pelican', compiler=html_fake) == '.. title: xx\n\n' assert write_metadata(data, 'pelican', compiler=None) == '.. title: xx\n\n'
def test_write_metadata_comment_wrap(): data = {'title': 'Hello, world!', 'slug': 'hello-world'} assert write_metadata(data, 'nikola') == """\ .. title: Hello, world! .. slug: hello-world """ assert write_metadata(data, 'nikola', True) == """\ <!-- .. title: Hello, world! .. slug: hello-world --> """ assert write_metadata(data, 'nikola', ('111', '222')) == """\
def create_post(self, path, **kw): """Create a new post.""" content = kw.pop('content', None) onefile = kw.pop('onefile', False) # is_page is not used by create_post as of now. kw.pop('is_page', False) metadata = {} metadata.update(self.default_metadata) metadata.update(kw) if not metadata['description']: # For PHP, a description must be set. Otherwise, Nikola will # take the first 200 characters of the post as the Open Graph # description (og:description meta element)! # If the PHP source leaks there: # (a) The script will be executed multiple times # (b) PHP may encounter a syntax error if it cuts too early, # therefore completely breaking the page # Here, we just use the title. The user should come up with # something better, but just using the title does the job. metadata['description'] = metadata['title'] makedirs(os.path.dirname(path)) if not content.endswith('\n'): content += '\n' with io.open(path, "w+", encoding="utf8") as fd: if onefile: fd.write(write_metadata(metadata, comment_wrap=True, site=self.site, compiler=self)) fd.write(content)
def create_post(self, path, **kw): content = kw.pop('content', None) onefile = kw.pop('onefile', False) kw.pop('is_page', False) metadata = OrderedDict() metadata.update(self.default_metadata) metadata.update(kw) makedirs(os.path.dirname(path)) with codecs.open(path, "wb+", "utf8") as fd: if onefile: fd.write("#+BEGIN_COMMENT\n") if write_metadata: fd.write(write_metadata(metadata)) else: for k, v in metadata.items(): fd.write('.. {0}: {1}\n'.format(k, v)) fd.write("#+END_COMMENT\n") fd.write("\n\n") if content: fd.write(content) else: fd.write('Write your post here.')
def create_post(self, path, content=None, onefile=False, is_page=False, **kw): """Create post file with optional metadata.""" metadata = OrderedDict() metadata.update(self.default_metadata) metadata.update(kw) makedirs(os.path.dirname(path)) with io.open(path, "w+", encoding="utf-8") as fd: if onefile: fd.write("#+BEGIN_COMMENT\n") if write_metadata: fd.write(write_metadata(metadata)) else: for k, v in metadata.items(): fd.write('.. {0}: {1}\n'.format(k, v)) fd.write("#+END_COMMENT\n") fd.write("\n\n") if content: fd.write(content) else: fd.write('Write your post here.')
def create_post(self, path, **kw): content = kw.pop('content', None) onefile = kw.pop('onefile', False) kw.pop('is_page', False) metadata = OrderedDict() metadata.update(self.default_metadata) metadata.update(kw) makedirs(os.path.dirname(path)) with io.open(path, "w+", encoding="utf-8") as fd: if onefile: fd.write("#+BEGIN_COMMENT\n") if write_metadata: fd.write(write_metadata(metadata)) else: for k, v in metadata.items(): fd.write('.. {0}: {1}\n'.format(k, v)) fd.write("#+END_COMMENT\n") fd.write("\n\n") if content: fd.write(content) else: fd.write('Write your post here.')
def test_write_metadata_compiler(): data = {'title': 'Hello, world!', 'slug': 'hello-world'} assert write_metadata(data, 'rest_docinfo') == """\ ============= Hello, world! ============= :slug: hello-world """ assert write_metadata(data, 'markdown_meta') in ("""\ title: Hello, world! slug: hello-world """, """slug: hello-world title: Hello, world! """)
def write_metadata(filename, title, slug, post_date, description, tags, **kwargs): if not description: description = "" utils.makedirs(os.path.dirname(filename)) with io.open(filename, "w+", encoding="utf8") as fd: data = {'title': title, 'slug': slug, 'date': post_date, 'tags': ','.join(tags), 'description': description} data.update(kwargs) fd.write(utils.write_metadata(data))
def test_write_metadata_compiler(metadata_format, expected_results): """ Test writing metadata with different formats. We test for multiple results because some compilers might produce unordered output. """ data = {"title": "Hello, world!", "slug": "hello-world"} assert write_metadata(data, metadata_format) in expected_results
def write_metadata(filename, title, slug, post_date, description, tags, **kwargs): """Write metadata to meta file.""" if not description: description = "" utils.makedirs(os.path.dirname(filename)) with io.open(filename, "w+", encoding="utf8") as fd: data = {"title": title, "slug": slug, "date": post_date, "tags": ",".join(tags), "description": description} data.update(kwargs) fd.write(utils.write_metadata(data))
def write_metadata(self, filename, title, slug, post_date, description, tags, **kwargs): """Write metadata to meta file.""" if not description: description = "" utils.makedirs(os.path.dirname(filename)) with io.open(filename, "w+", encoding="utf8") as fd: data = {'title': title, 'slug': slug, 'date': post_date, 'tags': ','.join(tags), 'description': description} data.update(kwargs) fd.write(utils.write_metadata(data, site=self.site, comment_wrap=False))
def create_post(self, path, content=None, onefile=False, is_page=False, **kw): """Create post file with optional metadata.""" metadata = OrderedDict() metadata.update(self.default_metadata) metadata.update(kw) makedirs(os.path.dirname(path)) if not content.endswith('\n'): content += '\n' with codecs.open(path, "wb+", "utf8") as fd: if onefile: fd.write("////\n") fd.write(write_metadata(metadata)) fd.write("////\n") fd.write(content)
def create_post(self, path, **kw): content = kw.pop('content', None) onefile = kw.pop('onefile', False) # is_page is not used by create_post as of now. kw.pop('is_page', False) metadata = {} metadata.update(self.default_metadata) metadata.update(kw) makedirs(os.path.dirname(path)) if not content.endswith('\n'): content += '\n' with io.open(path, "w+", encoding="utf8") as fd: if onefile: fd.write(write_metadata(metadata)) fd.write('\n' + content)
def test_write_metadata_with_formats(metadata_format, expected_result): """ Test writing metadata with different formats. YAML is expected to be sorted alphabetically. Nikola sorts by putting the defaults first and then sorting the rest alphabetically. """ data = { "slug": "hello-world", "title": "Hello, world!", "b": "2", "a": "1" } assert write_metadata(data, metadata_format) == expected_result
def create_post(self, path, **kw): content = kw.pop('content', 'Write your post here.') one_file = kw.pop('onefile', False) # NOQA is_page = kw.pop('is_page', False) # NOQA metadata = OrderedDict() metadata.update(self.default_metadata) metadata.update(kw) makedirs(os.path.dirname(path)) if not content.endswith('\n'): content += '\n' with codecs.open(path, "wb+", "utf8") as fd: if one_file: fd.write(write_metadata(metadata)) fd.write('\n') fd.write(content)
def create_post(self, path, **kw): """Create a new post.""" content = kw.pop('content', None) onefile = kw.pop('onefile', False) # is_page is not used by create_post as of now. kw.pop('is_page', False) metadata = {} metadata.update(self.default_metadata) metadata.update(kw) makedirs(os.path.dirname(path)) if not content.endswith('\n'): content += '\n' with io.open(path, "w+", encoding="utf-8") as fd: if onefile: fd.write(write_metadata(metadata, comment_wrap=False, site=self.site, compiler=self)) fd.write(content)
def create_post(self, path, **kw): """Create a new post.""" content = kw.pop('content', None) onefile = kw.pop('onefile', False) # is_page is not used by create_post as of now. kw.pop('is_page', False) metadata = {} metadata.update(self.default_metadata) metadata.update(kw) makedirs(os.path.dirname(path)) if not content.endswith('\n'): content += '\n' with io.open(path, "w+", encoding="utf8") as fd: if onefile: fd.write(write_metadata(metadata, comment_wrap=('///', '///'), site=self.site, compiler=self)) fd.write(content)
def create_post(self, path, **kw): content = kw.pop('content', None) onefile = kw.pop('onefile', False) kw.pop('is_page', False) metadata = OrderedDict() metadata.update(self.default_metadata) metadata.update(kw) makedirs(os.path.dirname(path)) if not content.endswith('\n'): content += '\n' with codecs.open(path, "wb+", "utf8") as fd: if onefile: fd.write('<notextile> <!--\n') fd.write(write_metadata(metadata)) fd.write('--></notextile>\n\n') fd.write(content)
def create_post(self, path, content, onefile=False, is_page=False, **kw): content = kw.pop("content", "Write your post here.") onefile = kw.pop("onefile", False) kw.pop("is_page", False) metadata = OrderedDict() metadata.update(self.default_metadata) metadata.update(kw) makedirs(os.path.dirname(path)) if not content.endswith("\n"): content += "\n" with codecs.open(path, "wb+", "utf8") as fd: if onefile: fd.write("<!-- \n") fd.write(write_metadata(metadata)) fd.write("-->\n\n") fd.write(content)
def create_post(self, path, **kw): """Create a new post.""" content = kw.pop("content", None) onefile = kw.pop("onefile", False) # is_page is not used by create_post as of now. kw.pop("is_page", False) metadata = {} metadata.update(self.default_metadata) metadata.update(kw) makedirs(os.path.dirname(path)) if not content.endswith("\n"): content += "\n" with io.open(path, "w+", encoding="utf8") as fd: if onefile: fd.write(write_metadata(metadata)) fd.write("\n") fd.write(content)
def create_post(self, path, **kw): content = kw.pop('content', 'Write your post here.') onefile = kw.pop('onefile', False) # is_page is not used by create_post as of now. kw.pop('is_page', False) metadata = {} metadata.update(self.default_metadata) metadata.update(kw) makedirs(os.path.dirname(path)) if not content.endswith('\n'): content += '\n' with codecs.open(path, "wb+", "utf8") as fd: if onefile: fd.write("\n'''\n<!--\n") fd.write(write_metadata(metadata)) fd.write("-->\n'''\n") fd.write(content)
def create_post(self, path, content=None, onefile=False, is_page=False, **kw): """Create post file with optional metadata.""" metadata = {} metadata.update(self.default_metadata) metadata.update(kw) makedirs(os.path.dirname(path)) if not content.endswith('\n'): content += '\n' with io.open(path, "w+", encoding="utf8") as fd: if onefile: fd.write(write_metadata(metadata)) fd.write('\n\n') fd.write(content)
def create_post(self, path, **kw): """Create a new post.""" content = kw.pop('content', None) onefile = kw.pop('onefile', False) # is_page is not used by create_post as of now. kw.pop('is_page', False) metadata = {} metadata.update(self.default_metadata) metadata.update(kw) makedirs(os.path.dirname(path)) if not content.endswith('\n'): content += '\n' with io.open(path, "w+", encoding="utf8") as fd: if onefile: _format = self.site.config.get('METADATA_FORMAT', 'nikola').lower() if _format == 'pelican': _format = 'pelican_rest' fd.write(write_metadata(metadata, _format)) fd.write('\n') fd.write(content)
def test_write_metadata_with_format_toml(): """ Test writing metadata in TOML format. TOML is sorted randomly in Python 3.5 or older and by insertion order since Python 3.6. """ data = { "slug": "hello-world", "title": "Hello, world!", "b": "2", "a": "1" } toml = write_metadata(data, "toml") assert toml.startswith("+++\n") assert toml.endswith("+++\n") assert 'slug = "hello-world"' in toml assert 'title = "Hello, world!"' in toml assert 'b = "2"' in toml assert 'a = "1"' in toml
def _execute(self, options, args): """Create a new post or page.""" global LOGGER compiler_names = [ p.name for p in self.site.plugin_manager.getPluginsOfCategory( "PageCompiler") ] if len(args) > 1: print(self.help()) return False elif args: path = args[0] else: path = None # Even though stuff was split into `new_page`, it’s easier to do it # here not to duplicate the code. is_page = options.get('is_page', False) is_post = not is_page content_type = 'page' if is_page else 'post' title = options['title'] or None author = options['author'] or '' tags = options['tags'] onefile = options['onefile'] twofile = options['twofile'] import_file = options['import'] wants_available = options['available-formats'] date_path_opt = options['date-path'] date_path_auto = self.site.config[ 'NEW_POST_DATE_PATH'] and content_type == 'post' date_path_format = self.site.config['NEW_POST_DATE_PATH_FORMAT'].strip( '/') if wants_available: self.print_compilers() return if is_page: LOGGER = PAGELOGGER else: LOGGER = POSTLOGGER if twofile: onefile = False if not onefile and not twofile: onefile = self.site.config.get('ONE_FILE_POSTS', True) content_format = options['content_format'] content_subformat = None if "@" in content_format: content_format, content_subformat = content_format.split("@") if not content_format and path and not os.path.isdir(path): # content_format not specified. If path was given, use # it to guess (Issue #2798) extension = os.path.splitext(path)[-1] for compiler, extensions in self.site.config['COMPILERS'].items(): if extension in extensions: content_format = compiler if not content_format: LOGGER.error( "Unknown {0} extension {1}, maybe you need to install a plugin or enable an existing one?" .format(content_type, extension)) return elif not content_format and import_file: # content_format not specified. If import_file was given, use # it to guess (Issue #2798) extension = os.path.splitext(import_file)[-1] for compiler, extensions in self.site.config['COMPILERS'].items(): if extension in extensions: content_format = compiler if not content_format: LOGGER.error( "Unknown {0} extension {1}, maybe you need to install a plugin or enable an existing one?" .format(content_type, extension)) return elif not content_format: # Issue #400 content_format = get_default_compiler( is_post, self.site.config['COMPILERS'], self.site.config['post_pages']) elif content_format not in compiler_names: LOGGER.error( "Unknown {0} format {1}, maybe you need to install a plugin or enable an existing one?" .format(content_type, content_format)) self.print_compilers() return compiler_plugin = self.site.plugin_manager.getPluginByName( content_format, "PageCompiler").plugin_object # Guess where we should put this entry = self.filter_post_pages(content_format, is_post) if entry is False: return 1 if import_file: print("Importing Existing {xx}".format(xx=content_type.title())) print("-----------------------\n") else: print("Creating New {xx}".format(xx=content_type.title())) print("-----------------\n") if title is not None: print("Title:", title) else: while not title: title = utils.ask('Title') if isinstance(title, utils.bytes_str): try: title = title.decode(sys.stdin.encoding) except (AttributeError, TypeError): # for tests title = title.decode('utf-8') title = title.strip() if not path: slug = utils.slugify(title, lang=self.site.default_lang) else: if isinstance(path, utils.bytes_str): try: path = path.decode(sys.stdin.encoding) except (AttributeError, TypeError): # for tests path = path.decode('utf-8') if os.path.isdir(path): # If the user provides a directory, add the file name generated from title (Issue #2651) slug = utils.slugify(title, lang=self.site.default_lang) pattern = os.path.basename(entry[0]) suffix = pattern[1:] path = os.path.join(path, slug + suffix) else: slug = utils.slugify(os.path.splitext( os.path.basename(path))[0], lang=self.site.default_lang) if isinstance(author, utils.bytes_str): try: author = author.decode(sys.stdin.encoding) except (AttributeError, TypeError): # for tests author = author.decode('utf-8') # Calculate the date to use for the content # SCHEDULE_ALL is post-only (Issue #2921) schedule = options['schedule'] or (self.site.config['SCHEDULE_ALL'] and is_post) rule = self.site.config['SCHEDULE_RULE'] self.site.scan_posts() timeline = self.site.timeline last_date = None if not timeline else timeline[0].date date, dateobj = get_date(schedule, rule, last_date, self.site.tzinfo, self.site.config['FORCE_ISO8601']) data = { 'title': title, 'slug': slug, 'date': date, 'tags': tags, 'link': '', 'description': '', 'type': 'text', } if not path: pattern = os.path.basename(entry[0]) suffix = pattern[1:] output_path = os.path.dirname(entry[0]) if date_path_auto or date_path_opt: output_path += os.sep + dateobj.strftime(date_path_format) txt_path = os.path.join(output_path, slug + suffix) meta_path = os.path.join(output_path, slug + ".meta") else: if date_path_opt: LOGGER.warning("A path has been specified, ignoring -d") txt_path = os.path.join(self.site.original_cwd, path) meta_path = os.path.splitext(txt_path)[0] + ".meta" if (not onefile and os.path.isfile(meta_path)) or \ os.path.isfile(txt_path): # Emit an event when a post exists event = dict(path=txt_path) if not onefile: # write metadata file event['meta_path'] = meta_path signal('existing_' + content_type).send(self, **event) LOGGER.error("The title already exists!") LOGGER.info("Existing {0}'s text is at: {1}".format( content_type, txt_path)) if not onefile: LOGGER.info("Existing {0}'s metadata is at: {1}".format( content_type, meta_path)) return 8 d_name = os.path.dirname(txt_path) utils.makedirs(d_name) metadata = {} if author: metadata['author'] = author metadata.update(self.site.config['ADDITIONAL_METADATA']) data.update(metadata) # ipynb plugin needs the Jupyter kernel info. We get the kernel name # from the content_subformat and pass it to the compiler in the metadata if content_format == "ipynb" and content_subformat is not None: metadata["jupyter_kernel"] = content_subformat # Override onefile if not really supported. if not compiler_plugin.supports_onefile and onefile: onefile = False LOGGER.warning('This compiler does not support one-file posts.') if onefile and import_file: with io.open(import_file, 'r', encoding='utf-8') as fh: content = fh.read() elif not import_file: if is_page: content = self.site.MESSAGES[ self.site.default_lang]["Write your page here."] else: content = self.site.MESSAGES[ self.site.default_lang]["Write your post here."] if (not onefile) and import_file: # Two-file posts are copied on import (Issue #2380) shutil.copy(import_file, txt_path) else: compiler_plugin.create_post(txt_path, content=content, onefile=onefile, title=title, slug=slug, date=date, tags=tags, is_page=is_page, **metadata) event = dict(path=txt_path) if not onefile: # write metadata file with io.open(meta_path, "w+", encoding="utf8") as fd: fd.write( utils.write_metadata(data, comment_wrap=False, site=self.site)) LOGGER.info("Your {0}'s metadata is at: {1}".format( content_type, meta_path)) event['meta_path'] = meta_path LOGGER.info("Your {0}'s text is at: {1}".format( content_type, txt_path)) signal('new_' + content_type).send(self, **event) if options['edit']: editor = os.getenv('EDITOR', '').split() to_run = editor + [txt_path] if not onefile: to_run.append(meta_path) if editor: subprocess.call(to_run) else: LOGGER.error( 'The $EDITOR environment variable is not set, cannot edit the post with \'-e\'. Please edit the post manually.' )
def test_write_metadata_pelican_detection(post, post_format, expected_metadata): post.name = post_format data = {"title": "xx"} assert write_metadata(data, "pelican", compiler=post) == expected_metadata
def test_write_metadata_comment_wrap(wrap, expected_result): data = {"title": "Hello, world!", "slug": "hello-world"} assert write_metadata(data, "nikola", wrap) == expected_result
def test_write_metadata_from_site(post): post.config = {"METADATA_FORMAT": "yaml"} data = {"title": "xx"} assert write_metadata(data, site=post) == "---\ntitle: xx\n---\n"
def test_write_metadata_pelican_detection_default(): data = {"title": "xx"} assert write_metadata(data, "pelican", compiler=None) == ".. title: xx\n\n"
def test_write_metadata_fallbacks(post, arg): data = {"title": "xx"} assert write_metadata(data, arg) == ".. title: xx\n\n"
def test_write_metadata_default(post): data = {"title": "xx"} assert write_metadata(data) == ".. title: xx\n\n"
def _serialize(self, metadata, doc, is_html): header = utils.write_metadata(metadata) pattern = '<!--\n{0}\n-->\n\n{1}' if is_html else '{0}\n\n{1}' return pattern.format(header.strip(), doc)
def _execute(self, options, args): """Create a new post or page.""" global LOGGER compiler_names = [p.name for p in self.site.plugin_manager.getPluginsOfCategory("PageCompiler")] if len(args) > 1: print(self.help()) return False elif args: path = args[0] else: path = None # Even though stuff was split into `new_page`, it’s easier to do it # here not to duplicate the code. is_page = options.get("is_page", False) is_post = not is_page content_type = "page" if is_page else "post" title = options["title"] or None author = options["author"] or "" tags = options["tags"] onefile = options["onefile"] twofile = options["twofile"] import_file = options["import"] wants_available = options["available-formats"] if wants_available: self.print_compilers() return if is_page: LOGGER = PAGELOGGER else: LOGGER = POSTLOGGER if twofile: onefile = False if not onefile and not twofile: onefile = self.site.config.get("ONE_FILE_POSTS", True) content_format = options["content_format"] content_subformat = None if "@" in content_format: content_format, content_subformat = content_format.split("@") if not content_format: # Issue #400 content_format = get_default_compiler( is_post, self.site.config["COMPILERS"], self.site.config["post_pages"] ) if content_format not in compiler_names: LOGGER.error( "Unknown {0} format {1}, maybe you need to install a plugin?".format(content_type, content_format) ) self.print_compilers() return compiler_plugin = self.site.plugin_manager.getPluginByName(content_format, "PageCompiler").plugin_object # Guess where we should put this entry = self.filter_post_pages(content_format, is_post) if entry is False: return 1 if import_file: print("Importing Existing {xx}".format(xx=content_type.title())) print("-----------------------\n") else: print("Creating New {xx}".format(xx=content_type.title())) print("-----------------\n") if title is not None: print("Title:", title) else: while not title: title = utils.ask("Title") if isinstance(title, utils.bytes_str): try: title = title.decode(sys.stdin.encoding) except (AttributeError, TypeError): # for tests title = title.decode("utf-8") title = title.strip() if not path: slug = utils.slugify(title) else: if isinstance(path, utils.bytes_str): try: path = path.decode(sys.stdin.encoding) except (AttributeError, TypeError): # for tests path = path.decode("utf-8") slug = utils.slugify(os.path.splitext(os.path.basename(path))[0]) if isinstance(author, utils.bytes_str): try: author = author.decode(sys.stdin.encoding) except (AttributeError, TypeError): # for tests author = author.decode("utf-8") # Calculate the date to use for the content schedule = options["schedule"] or self.site.config["SCHEDULE_ALL"] rule = self.site.config["SCHEDULE_RULE"] self.site.scan_posts() timeline = self.site.timeline last_date = None if not timeline else timeline[0].date date = get_date(schedule, rule, last_date, self.site.tzinfo, self.site.config["FORCE_ISO8601"]) data = {"title": title, "slug": slug, "date": date, "tags": tags, "link": "", "description": "", "type": "text"} output_path = os.path.dirname(entry[0]) meta_path = os.path.join(output_path, slug + ".meta") pattern = os.path.basename(entry[0]) suffix = pattern[1:] if not path: txt_path = os.path.join(output_path, slug + suffix) else: txt_path = os.path.join(self.site.original_cwd, path) if (not onefile and os.path.isfile(meta_path)) or os.path.isfile(txt_path): # Emit an event when a post exists event = dict(path=txt_path) if not onefile: # write metadata file event["meta_path"] = meta_path signal("existing_" + content_type).send(self, **event) LOGGER.error("The title already exists!") return 8 d_name = os.path.dirname(txt_path) utils.makedirs(d_name) metadata = {} if author: metadata["author"] = author metadata.update(self.site.config["ADDITIONAL_METADATA"]) data.update(metadata) # ipynb plugin needs the ipython kernel info. We get the kernel name # from the content_subformat and pass it to the compiler in the metadata if content_format == "ipynb" and content_subformat is not None: metadata["ipython_kernel"] = content_subformat # Override onefile if not really supported. if not compiler_plugin.supports_onefile and onefile: onefile = False LOGGER.warn("This compiler does not support one-file posts.") if import_file: with io.open(import_file, "r", encoding="utf-8") as fh: content = fh.read() else: if is_page: content = self.site.MESSAGES[self.site.default_lang]["Write your page here."] else: content = self.site.MESSAGES[self.site.default_lang]["Write your post here."] compiler_plugin.create_post( txt_path, content=content, onefile=onefile, title=title, slug=slug, date=date, tags=tags, is_page=is_page, **metadata ) event = dict(path=txt_path) if not onefile: # write metadata file with io.open(meta_path, "w+", encoding="utf8") as fd: fd.write(utils.write_metadata(data)) LOGGER.info("Your {0}'s metadata is at: {1}".format(content_type, meta_path)) event["meta_path"] = meta_path LOGGER.info("Your {0}'s text is at: {1}".format(content_type, txt_path)) signal("new_" + content_type).send(self, **event) if options["edit"]: editor = os.getenv("EDITOR", "").split() to_run = editor + [txt_path] if not onefile: to_run.append(meta_path) if editor: subprocess.call(to_run) else: LOGGER.error("$EDITOR not set, cannot edit the post. Please do it manually.")
def _execute(self, options, args): """Create a new post or page.""" global LOGGER compiler_names = [p.name for p in self.site.plugin_manager.getPluginsOfCategory( "PageCompiler")] if len(args) > 1: print(self.help()) return False elif args: path = args[0] else: path = None # Even though stuff was split into `new_page`, it’s easier to do it # here not to duplicate the code. is_page = options.get('is_page', False) is_post = not is_page content_type = 'page' if is_page else 'post' title = options['title'] or None tags = options['tags'] onefile = options['onefile'] twofile = options['twofile'] if is_page: LOGGER = PAGELOGGER else: LOGGER = POSTLOGGER if twofile: onefile = False if not onefile and not twofile: onefile = self.site.config.get('ONE_FILE_POSTS', True) content_format = options['content_format'] if not content_format: # Issue #400 content_format = get_default_compiler( is_post, self.site.config['COMPILERS'], self.site.config['post_pages']) if content_format not in compiler_names: LOGGER.error("Unknown {0} format {1}".format(content_type, content_format)) return compiler_plugin = self.site.plugin_manager.getPluginByName( content_format, "PageCompiler").plugin_object # Guess where we should put this entry = filter_post_pages(content_format, is_post, self.site.config['COMPILERS'], self.site.config['post_pages']) print("Creating New {0}".format(content_type.title())) print("-----------------\n") if title is None: inpf = raw_input if sys.version_info[0] == 2 else input title = inpf("Enter title: ").strip() else: print("Title:", title) if isinstance(title, utils.bytes_str): try: title = title.decode(sys.stdin.encoding) except AttributeError: # for tests title = title.decode('utf-8') title = title.strip() if not path: slug = utils.slugify(title) else: if isinstance(path, utils.bytes_str): try: path = path.decode(sys.stdin.encoding) except AttributeError: # for tests path = path.decode('utf-8') slug = utils.slugify(os.path.splitext(os.path.basename(path))[0]) # Calculate the date to use for the content schedule = options['schedule'] or self.site.config['SCHEDULE_ALL'] rule = self.site.config['SCHEDULE_RULE'] force_today = self.site.config['SCHEDULE_FORCE_TODAY'] self.site.scan_posts() timeline = self.site.timeline last_date = None if not timeline else timeline[0].date date = get_date(schedule, rule, last_date, force_today, self.site.tzinfo, self.site.config['FORCE_ISO8601']) data = { 'title': title, 'slug': slug, 'date': date, 'tags': tags, 'link': '', 'description': '', 'type': 'text', } output_path = os.path.dirname(entry[0]) meta_path = os.path.join(output_path, slug + ".meta") pattern = os.path.basename(entry[0]) suffix = pattern[1:] if not path: txt_path = os.path.join(output_path, slug + suffix) else: txt_path = path if (not onefile and os.path.isfile(meta_path)) or \ os.path.isfile(txt_path): LOGGER.error("The title already exists!") exit() d_name = os.path.dirname(txt_path) utils.makedirs(d_name) metadata = self.site.config['ADDITIONAL_METADATA'] # Override onefile if not really supported. if not compiler_plugin.supports_onefile and onefile: onefile = False LOGGER.warn('This compiler does not support one-file posts.') content = "Write your {0} here.".format('page' if is_page else 'post') compiler_plugin.create_post( txt_path, content=content, onefile=onefile, title=title, slug=slug, date=date, tags=tags, is_page=is_page, **metadata) event = dict(path=txt_path) if not onefile: # write metadata file with codecs.open(meta_path, "wb+", "utf8") as fd: fd.write(utils.write_metadata(data)) with codecs.open(txt_path, "wb+", "utf8") as fd: fd.write("Write your {0} here.".format(content_type)) LOGGER.info("Your {0}'s metadata is at: {1}".format(content_type, meta_path)) event['meta_path'] = meta_path LOGGER.info("Your {0}'s text is at: {1}".format(content_type, txt_path)) signal('new_' + content_type).send(self, **event)
def _execute(self, options, args): """Create a new post or page.""" global LOGGER compiler_names = [p.name for p in self.site.plugin_manager.getPluginsOfCategory( "PageCompiler")] if len(args) > 1: print(self.help()) return False elif args: path = args[0] else: path = None # Even though stuff was split into `new_page`, it’s easier to do it # here not to duplicate the code. is_page = options.get('is_page', False) is_post = not is_page content_type = 'page' if is_page else 'post' title = options['title'] or None author = options['author'] or '' tags = options['tags'] onefile = options['onefile'] twofile = options['twofile'] import_file = options['import'] wants_available = options['available-formats'] date_path_opt = options['date-path'] date_path_auto = self.site.config['NEW_POST_DATE_PATH'] date_path_format = self.site.config['NEW_POST_DATE_PATH_FORMAT'].strip('/') if wants_available: self.print_compilers() return if is_page: LOGGER = PAGELOGGER else: LOGGER = POSTLOGGER if twofile: onefile = False if not onefile and not twofile: onefile = self.site.config.get('ONE_FILE_POSTS', True) content_format = options['content_format'] content_subformat = None if "@" in content_format: content_format, content_subformat = content_format.split("@") if not content_format: # Issue #400 content_format = get_default_compiler( is_post, self.site.config['COMPILERS'], self.site.config['post_pages']) if content_format not in compiler_names: LOGGER.error("Unknown {0} format {1}, maybe you need to install a plugin or enable an existing one?".format(content_type, content_format)) self.print_compilers() return compiler_plugin = self.site.plugin_manager.getPluginByName( content_format, "PageCompiler").plugin_object # Guess where we should put this entry = self.filter_post_pages(content_format, is_post) if entry is False: return 1 if import_file: print("Importing Existing {xx}".format(xx=content_type.title())) print("-----------------------\n") else: print("Creating New {xx}".format(xx=content_type.title())) print("-----------------\n") if title is not None: print("Title:", title) else: while not title: title = utils.ask('Title') if isinstance(title, utils.bytes_str): try: title = title.decode(sys.stdin.encoding) except (AttributeError, TypeError): # for tests title = title.decode('utf-8') title = title.strip() if not path: slug = utils.slugify(title, lang=self.site.default_lang) else: if isinstance(path, utils.bytes_str): try: path = path.decode(sys.stdin.encoding) except (AttributeError, TypeError): # for tests path = path.decode('utf-8') slug = utils.slugify(os.path.splitext(os.path.basename(path))[0], lang=self.site.default_lang) if isinstance(author, utils.bytes_str): try: author = author.decode(sys.stdin.encoding) except (AttributeError, TypeError): # for tests author = author.decode('utf-8') # Calculate the date to use for the content schedule = options['schedule'] or self.site.config['SCHEDULE_ALL'] rule = self.site.config['SCHEDULE_RULE'] self.site.scan_posts() timeline = self.site.timeline last_date = None if not timeline else timeline[0].date date, dateobj = get_date(schedule, rule, last_date, self.site.tzinfo, self.site.config['FORCE_ISO8601']) data = { 'title': title, 'slug': slug, 'date': date, 'tags': tags, 'link': '', 'description': '', 'type': 'text', } if not path: pattern = os.path.basename(entry[0]) suffix = pattern[1:] output_path = os.path.dirname(entry[0]) if date_path_auto or date_path_opt: output_path += os.sep + dateobj.strftime(date_path_format) txt_path = os.path.join(output_path, slug + suffix) meta_path = os.path.join(output_path, slug + ".meta") else: if date_path_opt: LOGGER.warn("A path has been specified, ignoring -d") txt_path = os.path.join(self.site.original_cwd, path) meta_path = os.path.splitext(txt_path)[0] + ".meta" if (not onefile and os.path.isfile(meta_path)) or \ os.path.isfile(txt_path): # Emit an event when a post exists event = dict(path=txt_path) if not onefile: # write metadata file event['meta_path'] = meta_path signal('existing_' + content_type).send(self, **event) LOGGER.error("The title already exists!") LOGGER.info("Existing {0}'s text is at: {1}".format(content_type, txt_path)) if not onefile: LOGGER.info("Existing {0}'s metadata is at: {1}".format(content_type, meta_path)) return 8 d_name = os.path.dirname(txt_path) utils.makedirs(d_name) metadata = {} if author: metadata['author'] = author metadata.update(self.site.config['ADDITIONAL_METADATA']) data.update(metadata) # ipynb plugin needs the ipython kernel info. We get the kernel name # from the content_subformat and pass it to the compiler in the metadata if content_format == "ipynb" and content_subformat is not None: metadata["ipython_kernel"] = content_subformat # Override onefile if not really supported. if not compiler_plugin.supports_onefile and onefile: onefile = False LOGGER.warn('This compiler does not support one-file posts.') if onefile and import_file: with io.open(import_file, 'r', encoding='utf-8') as fh: content = fh.read() elif not import_file: if is_page: content = self.site.MESSAGES[self.site.default_lang]["Write your page here."] else: content = self.site.MESSAGES[self.site.default_lang]["Write your post here."] if (not onefile) and import_file: # Two-file posts are copied on import (Issue #2380) shutil.copy(import_file, txt_path) else: compiler_plugin.create_post( txt_path, content=content, onefile=onefile, title=title, slug=slug, date=date, tags=tags, is_page=is_page, **metadata) event = dict(path=txt_path) if not onefile: # write metadata file with io.open(meta_path, "w+", encoding="utf8") as fd: fd.write(utils.write_metadata(data)) LOGGER.info("Your {0}'s metadata is at: {1}".format(content_type, meta_path)) event['meta_path'] = meta_path LOGGER.info("Your {0}'s text is at: {1}".format(content_type, txt_path)) signal('new_' + content_type).send(self, **event) if options['edit']: editor = os.getenv('EDITOR', '').split() to_run = editor + [txt_path] if not onefile: to_run.append(meta_path) if editor: subprocess.call(to_run) else: LOGGER.error('$EDITOR not set, cannot edit the post. Please do it manually.')
def _execute(self, options, args): """Create a new post or page.""" global LOGGER compiler_names = [ p.name for p in self.site.plugin_manager.getPluginsOfCategory( "PageCompiler") ] if len(args) > 1: print(self.help()) return False elif args: path = args[0] else: path = None # Even though stuff was split into `new_page`, it’s easier to do it # here not to duplicate the code. is_page = options.get('is_page', False) is_post = not is_page content_type = 'page' if is_page else 'post' title = options['title'] or None author = options['author'] or '' tags = options['tags'] onefile = options['onefile'] twofile = options['twofile'] import_file = options['import'] if is_page: LOGGER = PAGELOGGER else: LOGGER = POSTLOGGER if twofile: onefile = False if not onefile and not twofile: onefile = self.site.config.get('ONE_FILE_POSTS', True) content_format = options['content_format'] if not content_format: # Issue #400 content_format = get_default_compiler( is_post, self.site.config['COMPILERS'], self.site.config['post_pages']) if content_format not in compiler_names: LOGGER.error("Unknown {0} format {1}".format( content_type, content_format)) return compiler_plugin = self.site.plugin_manager.getPluginByName( content_format, "PageCompiler").plugin_object # Guess where we should put this entry = filter_post_pages(content_format, is_post, self.site.config['COMPILERS'], self.site.config['post_pages']) if import_file: print("Importing Existing {xx}".format(xx=content_type.title())) print("-----------------------\n") else: print("Creating New {xx}".format(xx=content_type.title())) print("-----------------\n") if title is not None: print("Title:", title) else: while not title: title = utils.ask('Title') if isinstance(title, utils.bytes_str): try: title = title.decode(sys.stdin.encoding) except (AttributeError, TypeError): # for tests title = title.decode('utf-8') title = title.strip() if not path: slug = utils.slugify(title) else: if isinstance(path, utils.bytes_str): try: path = path.decode(sys.stdin.encoding) except (AttributeError, TypeError): # for tests path = path.decode('utf-8') slug = utils.slugify(os.path.splitext(os.path.basename(path))[0]) if isinstance(author, utils.bytes_str): try: author = author.decode(sys.stdin.encoding) except (AttributeError, TypeError): # for tests author = author.decode('utf-8') # Calculate the date to use for the content schedule = options['schedule'] or self.site.config['SCHEDULE_ALL'] rule = self.site.config['SCHEDULE_RULE'] self.site.scan_posts() timeline = self.site.timeline last_date = None if not timeline else timeline[0].date date = get_date(schedule, rule, last_date, self.site.tzinfo, self.site.config['FORCE_ISO8601']) data = { 'title': title, 'slug': slug, 'date': date, 'tags': tags, 'link': '', 'description': '', 'type': 'text', } output_path = os.path.dirname(entry[0]) meta_path = os.path.join(output_path, slug + ".meta") pattern = os.path.basename(entry[0]) suffix = pattern[1:] if not path: txt_path = os.path.join(output_path, slug + suffix) else: txt_path = path if (not onefile and os.path.isfile(meta_path)) or \ os.path.isfile(txt_path): LOGGER.error("The title already exists!") exit(8) d_name = os.path.dirname(txt_path) utils.makedirs(d_name) metadata = {} if author: metadata['author'] = author metadata.update(self.site.config['ADDITIONAL_METADATA']) data.update(metadata) # Override onefile if not really supported. if not compiler_plugin.supports_onefile and onefile: onefile = False LOGGER.warn('This compiler does not support one-file posts.') if import_file: with io.open(import_file, 'r', encoding='utf-8') as fh: content = fh.read() else: # ipynb's create_post depends on this exact string, take care # if you're changing it content = "Write your {0} here.".format( 'page' if is_page else 'post') compiler_plugin.create_post(txt_path, content=content, onefile=onefile, title=title, slug=slug, date=date, tags=tags, is_page=is_page, **metadata) event = dict(path=txt_path) if not onefile: # write metadata file with io.open(meta_path, "w+", encoding="utf8") as fd: fd.write(utils.write_metadata(data)) LOGGER.info("Your {0}'s metadata is at: {1}".format( content_type, meta_path)) event['meta_path'] = meta_path LOGGER.info("Your {0}'s text is at: {1}".format( content_type, txt_path)) signal('new_' + content_type).send(self, **event) if options['edit']: editor = os.getenv('EDITOR', '').split() to_run = editor + [txt_path] if not onefile: to_run.append(meta_path) if editor: subprocess.call(to_run) else: LOGGER.error( '$EDITOR not set, cannot edit the post. Please do it manually.' )
def _execute(self, options, args): """Create a new post or page.""" global LOGGER compiler_names = [p.name for p in self.site.plugin_manager.getPluginsOfCategory( "PageCompiler")] if len(args) > 1: print(self.help()) return False elif args: path = args[0] else: path = None # Even though stuff was split into `new_page`, it’s easier to do it # here not to duplicate the code. is_page = options.get('is_page', False) is_post = not is_page content_type = 'page' if is_page else 'post' title = options['title'] or None author = options['author'] or '' tags = options['tags'] onefile = options['onefile'] twofile = options['twofile'] import_file = options['import'] if is_page: LOGGER = PAGELOGGER else: LOGGER = POSTLOGGER if twofile: onefile = False if not onefile and not twofile: onefile = self.site.config.get('ONE_FILE_POSTS', True) content_format = options['content_format'] if not content_format: # Issue #400 content_format = get_default_compiler( is_post, self.site.config['COMPILERS'], self.site.config['post_pages']) if content_format not in compiler_names: LOGGER.error("Unknown {0} format {1}".format(content_type, content_format)) return compiler_plugin = self.site.plugin_manager.getPluginByName( content_format, "PageCompiler").plugin_object # Guess where we should put this entry = filter_post_pages(content_format, is_post, self.site.config['COMPILERS'], self.site.config['post_pages']) if import_file: print("Importing Existing {xx}".format(xx=content_type.title())) print("-----------------------\n") else: print("Creating New {xx}".format(xx=content_type.title())) print("-----------------\n") if title is not None: print("Title:", title) else: while not title: title = utils.ask('Title') if isinstance(title, utils.bytes_str): try: title = title.decode(sys.stdin.encoding) except (AttributeError, TypeError): # for tests title = title.decode('utf-8') title = title.strip() if not path: slug = utils.slugify(title) else: if isinstance(path, utils.bytes_str): try: path = path.decode(sys.stdin.encoding) except (AttributeError, TypeError): # for tests path = path.decode('utf-8') slug = utils.slugify(os.path.splitext(os.path.basename(path))[0]) if isinstance(author, utils.bytes_str): try: author = author.decode(sys.stdin.encoding) except (AttributeError, TypeError): # for tests author = author.decode('utf-8') # Calculate the date to use for the content schedule = options['schedule'] or self.site.config['SCHEDULE_ALL'] rule = self.site.config['SCHEDULE_RULE'] self.site.scan_posts() timeline = self.site.timeline last_date = None if not timeline else timeline[0].date date = get_date(schedule, rule, last_date, self.site.tzinfo, self.site.config['FORCE_ISO8601']) data = { 'title': title, 'slug': slug, 'date': date, 'tags': tags, 'link': '', 'description': '', 'type': 'text', } output_path = os.path.dirname(entry[0]) meta_path = os.path.join(output_path, slug + ".meta") pattern = os.path.basename(entry[0]) suffix = pattern[1:] if not path: txt_path = os.path.join(output_path, slug + suffix) else: txt_path = path if (not onefile and os.path.isfile(meta_path)) or \ os.path.isfile(txt_path): LOGGER.error("The title already exists!") exit(8) d_name = os.path.dirname(txt_path) utils.makedirs(d_name) metadata = {} if author: metadata['author'] = author metadata.update(self.site.config['ADDITIONAL_METADATA']) data.update(metadata) # Override onefile if not really supported. if not compiler_plugin.supports_onefile and onefile: onefile = False LOGGER.warn('This compiler does not support one-file posts.') if import_file: with io.open(import_file, 'r', encoding='utf-8') as fh: content = fh.read() else: # ipynb's create_post depends on this exact string, take care # if you're changing it content = "Write your {0} here.".format('page' if is_page else 'post') compiler_plugin.create_post( txt_path, content=content, onefile=onefile, title=title, slug=slug, date=date, tags=tags, is_page=is_page, **metadata) event = dict(path=txt_path) if not onefile: # write metadata file with io.open(meta_path, "w+", encoding="utf8") as fd: fd.write(utils.write_metadata(data)) LOGGER.info("Your {0}'s metadata is at: {1}".format(content_type, meta_path)) event['meta_path'] = meta_path LOGGER.info("Your {0}'s text is at: {1}".format(content_type, txt_path)) signal('new_' + content_type).send(self, **event) if options['edit']: editor = os.getenv('EDITOR', '').split() to_run = editor + [txt_path] if not onefile: to_run.append(meta_path) if editor: subprocess.call(to_run) else: LOGGER.error('$EDITOR not set, cannot edit the post. Please do it manually.')
def edit(path): """Edit a post. If requested over GET, shows the edit UI. If requested over POST, saves the post and shows the edit UI. :param path: Path to post to edit. """ context = {'path': path, 'site': site} post = find_post(path) if post is None: return error("No such post or page.", 404) current_auid = int(post.meta('author.uid') or current_user.uid) if (not current_user.can_edit_all_posts and current_auid != current_user.uid): return error("Cannot edit posts of other users.", 401) if request.method == 'POST': meta = {} for k, v in request.form.items(): meta[k] = v meta.pop('_wysihtml5_mode', '') try: meta['author'] = get_user(meta['author.uid']).realname current_auid = int(meta['author.uid']) author_change_success = True except Exception: author_change_success = False if (not current_user.can_transfer_post_authorship or not author_change_success): meta['author'] = post.meta('author') or current_user.realname meta['author.uid'] = str(current_auid) twofile = post.is_two_file onefile = not twofile post.compiler.create_post(post.source_path, onefile=onefile, is_page=False, **meta) context['post_content'] = meta['content'] if twofile: meta_path = os.path.splitext(path)[0] + '.meta' # We cannot save `content` as meta, otherwise things break badly meta.pop('content', '') with io.open(meta_path, 'w+', encoding='utf-8') as fh: fh.write(write_metadata(meta)) scan_site() if db is not None: db.set('site:needs_rebuild', '1') else: site.coil_needs_rebuild = '1' post = find_post(path) context['action'] = 'save' else: context['action'] = 'edit' with io.open(path, 'r', encoding='utf-8') as fh: context['post_content'] = fh.read() if not post.is_two_file: context['post_content'] = context['post_content'].split( '\n\n', 1)[1] context['post'] = post users = [] if db is not None: uids = db.hgetall('users').values() for u in uids: u = u.decode('utf-8') realname, active = db.hmget('user:{0}'.format(u), 'realname', 'active') if active in (u'1', b'1'): users.append((u, realname.decode('utf-8'))) else: for u, d in app.config['COIL_USERS'].items(): if d['active']: users.append((int(u), d['realname'])) context['users'] = sorted(users) context['current_auid'] = current_auid context['title'] = 'Editing {0}'.format(post.title()) context['is_html'] = post.compiler.name == 'html' return render('coil_post_edit.tmpl', context)
def _execute(self, options, args): L = utils.get_logger('upgrade_metadata_v8', utils.STDERR_HANDLER) if not self.site.config['USE_TAG_METADATA']: L.error('This plugin can only be used if USE_TAG_METADATA is set to True.') sys.exit(-1) self.site.config['WARN_ABOUT_TAG_METADATA'] = False # scan posts self.site.scan_posts() flagged = [] for post in self.site.timeline: flag = False if post.has_oldstyle_metadata_tags: flag = True for lang in self.site.config['TRANSLATIONS'].keys(): if 'section' in post.meta[lang]: flag = True if flag: flagged.append(post) if flagged: if len(flagged) == 1: L.info('1 post (and/or its translations) contains old-style metadata or has section metadata:') else: L.info('{0} posts (and/or their translations) contain old-style metadata or have section metadata:'.format(len(flagged))) for post in flagged: L.info(' ' + (post.metadata_path if post.is_two_file else post.source_path)) L.warn('Please make a backup before running this plugin. It might eat your data.') if not options['yes']: yesno = utils.ask_yesno("Proceed with metadata upgrade?") if options['yes'] or yesno: number_converted = 0 number_converted_partial = 0 for post in flagged: converted = False fully_converted = True for lang in self.site.config['TRANSLATIONS'].keys(): # Get file names and extractor extractor = post.used_extractor[lang] is_two_file = post.is_two_file if lang == post.default_lang: fname = post.metadata_path if is_two_file else post.source_path else: meta_path = os.path.splitext(post.source_path)[0] + '.meta' if is_two_file else post.source_path fname = utils.get_translation_candidate(post.config, meta_path, lang) # We don't handle compilers which extract metadata for now if post.compiler is extractor: L.warn('Cannot convert {0} (language {1}), as metadata was extracted by compiler.'.format(fname, lang)) fully_converted = False continue # Read metadata and text from post file if not os.path.exists(fname): L.debug("File {0} does not exist, skipping.".format(fname)) continue with io.open(fname, "r", encoding="utf-8-sig") as meta_file: source_text = meta_file.read() if not is_two_file: _, content_str = extractor.split_metadata_from_text(source_text) meta = extractor.extract_text(source_text) # Consider metadata mappings sources = {} for m in ('tags', 'status', 'has_math', 'section', 'category'): sources[m] = m for foreign, ours in self.site.config.get('METADATA_MAPPING', {}).get(extractor.map_from, {}).items(): if ours in sources: sources[ours] = foreign for meta_key, hook in self.site.config.get('METADATA_VALUE_MAPPING', {}).get(extractor.map_from, {}).items(): if meta_key in sources.values(): L.warn('Cannot convert {0} (language {1}): a metadata value mapping is defined for "{2}"!'.format(fname, lang, meta_key)) # Update metadata updated = False tags = meta.get(sources['tags'], []) tags_are_string = False if not isinstance(tags, list): tags_are_string = True tags = [tag.strip() for tag in tags.split(',') if tag.strip()] if 'draft' in [_.lower() for _ in tags]: tags.remove('draft') meta[sources['status']] = 'draft' updated = True if 'private' in tags: tags.remove('private') meta[sources['status']] = 'private' updated = True if 'mathjax' in tags: tags.remove('mathjax') meta[sources['has_math']] = 'yes' updated = True if meta.get(sources['section']): if meta.get(sources['category']): L.warn('Cannot completely {0} (language {1}): both section and category are specified. Please determine the correct category to use yourself!'.format(fname, lang)) fully_converted = False else: meta[sources['category']] = meta[sources['section']] del meta[sources['section']] updated = True if tags_are_string: meta[sources['tags']] = ', '.join(tags) if not updated: # Nothing to do (but successful)! converted = True continue # Recombine metadata with post text if necessary, and write back to file meta_str = utils.write_metadata(meta, metadata_format=extractor.name, compiler=post.compiler, comment_wrap=(post.compiler.name != 'rest'), site=self.site) final_str = meta_str if is_two_file else (meta_str + content_str) with io.open(fname, "w", encoding="utf-8") as meta_file: meta_file.write(final_str) converted = True if converted: if fully_converted: number_converted += 1 else: number_converted_partial += 1 L.info('{0} out of {2} posts upgraded; {1} only converted partially ' '(see above output).'.format(number_converted + number_converted_partial, number_converted_partial, len(flagged))) else: L.info('Metadata not upgraded.') else: L.info('No posts found with special tags or section metadata. No action is required.') L.info('You can safely set the USE_TAG_METADATA and the WARN_ABOUT_TAG_METADATA settings to False.')