def hyphenate(dom, _lang): """Hyphenate a post.""" # circular import prevention from .nikola import LEGAL_VALUES lang = None if pyphen is not None: lang = LEGAL_VALUES['PYPHEN_LOCALES'].get(_lang, pyphen.language_fallback(_lang)) else: utils.req_missing(['pyphen'], 'hyphenate texts', optional=True) hyphenator = None if pyphen is not None and lang is not None: # If pyphen does exist, we tell the user when configuring the site. # If it does not support a language, we ignore it quietly. try: hyphenator = pyphen.Pyphen(lang=lang) except KeyError: LOGGER.error("Cannot find hyphenation dictoniaries for {0} (from {1}).".format(lang, _lang)) LOGGER.error("Pyphen cannot be installed to ~/.local (pip install --user).") if hyphenator is not None: for tag in ('p', 'li', 'span'): for node in dom.xpath("//%s[not(parent::pre)]" % tag): skip_node = False skippable_nodes = ['kbd', 'pre', 'code', 'samp', 'mark', 'math', 'data', 'ruby', 'svg'] if node.getchildren(): for child in node.getchildren(): if child.tag in skippable_nodes or (child.tag == 'span' and 'math' in child.get('class', [])): skip_node = True elif 'math' in node.get('class', []): skip_node = True if not skip_node: insert_hyphens(node, hyphenator) return dom
def _execute(self, options, args): ''' Import Atom/RSS feed ''' if feedparser is None: req_missing(['feedparser'], 'import feeds') return if not options['url']: print(self.help()) return self.feed_url = options['url'] self.output_folder = options['output_folder'] self.import_into_existing_site = False self.url_map = {} channel = self.get_channel_from_file(self.feed_url) self.context = self.populate_context(channel) conf_template = self.generate_base_site() self.context['REDIRECTIONS'] = self.configure_redirections( self.url_map) self.import_posts(channel) self.write_configuration(self.get_configuration_output_path( ), conf_template.render(**prepare_config(self.context)))
def compile_html(self, source, dest, is_two_file=True): """Compile source file into HTML and save as dest.""" makedirs(os.path.dirname(dest)) try: try: post = self.site.post_per_input_file[source] except KeyError: post = None subprocess.check_call(['pandoc', '-o', dest, source] + self.site.config['PANDOC_OPTIONS']) with open(dest, 'r', encoding='utf-8') as inf: output, shortcode_deps = self.site.apply_shortcodes( inf.read(), with_dependencies=True) with open(dest, 'w', encoding='utf-8') as outf: outf.write(output) if post is None: if shortcode_deps: self.logger.error( "Cannot save dependencies for post {0} due to unregistered source file name", source) else: post._depfile[dest] += shortcode_deps except OSError as e: if e.strreror == 'No such file or directory': req_missing(['pandoc'], 'build this site (compile with pandoc)', python=False)
def _execute(self, options, args): """Import a Blogger blog from an export file into a Nikola site.""" # Parse the data if feedparser is None: req_missing(['feedparser'], 'import Blogger dumps') return if not args: print(self.help()) return options['filename'] = args[0] self.blogger_export_file = options['filename'] self.output_folder = options['output_folder'] self.import_into_existing_site = False self.exclude_drafts = options['exclude_drafts'] self.url_map = {} channel = self.get_channel_from_file(self.blogger_export_file) self.context = self.populate_context(channel) conf_template = self.generate_base_site() self.context['REDIRECTIONS'] = self.configure_redirections( self.url_map) self.import_posts(channel) self.write_urlmap_csv(os.path.join(self.output_folder, 'url_map.csv'), self.url_map) conf_out_path = self.get_configuration_output_path() # if it tracebacks here, look a comment in # basic_import.Import_Mixin.generate_base_site conf_template_render = conf_template.render( **prepare_config(self.context)) self.write_configuration(conf_out_path, conf_template_render)
def gen_tasks(self): if peewee is None or sys.version_info[0] == 3: if sys.version_info[0] == 3: message = 'Peewee, a requirement of the "planetoid" command, is currently incompatible with Python 3.' else: req_missing('peewee', 'use the "planetoid" command') message = '' yield { 'basename': self.name, 'name': '', 'verbosity': 2, 'actions': ['echo "%s"' % message] } else: self.init_db() self.load_feeds() for task in self.task_update_feeds(): yield task for task in self.task_generate_posts(): yield task yield { 'basename': self.name, 'name': '', 'actions': [], 'file_dep': ['feeds'], 'task_dep': [ self.name + "_fetch_feed", self.name + "_generate_posts", ] }
def compile_html(self, source, dest, is_two_file=True): makedirs(os.path.dirname(dest)) try: subprocess.check_call(('asciidoc', '-f', 'html', '-s', '-o', dest, source)) except OSError as e: if e.strreror == 'No such file or directory': req_missing(['asciidoc'], 'build this site (compile with asciidoc)', python=False)
def plantuml_manager(self): """PlantUmlManager instance from the "plantuml" plugin""" if not self._plantuml_manager: plugin_info = self._site.plugin_manager.getPluginByName('plantuml', category='Task') if not plugin_info: req_missing("plantuml plugin", "use the plantuml_markdown plugin", python=False) self._plantuml_manager = plugin_info.plugin_object.plantuml_manager return self._plantuml_manager
def check_ghp_import_installed(): """Check if ghp-import is installed.""" try: subprocess.check_output(['ghp-import', '-h']) except OSError: # req_missing defaults to `python=True` — and it’s meant to be like this. # `ghp-import` is installed via pip, but the only way to use it is by executing the script it installs. req_missing(['ghp-import2'], 'deploy the site to GitHub Pages')
def get_json(self, url): if self.json is not None: return self.json if (requests is None): utils.req_missing(['requests'], 'install or list available plugins') data = requests.get(url).text data = json.loads(data)
def set_site(self, site): """Set Nikola site.""" super(BuildBundles, self).set_site(site) if webassets is None and site.configured and site.config['USE_BUNDLES']: utils.req_missing(['webassets'], 'USE_BUNDLES', optional=True) self.logger.warn('Setting USE_BUNDLES to False.') site.config['USE_BUNDLES'] = False site._GLOBAL_CONTEXT['use_bundles'] = False
def set_site(self, site): """Set Nikola site.""" self.logger = utils.get_logger('bundles', utils.STDERR_HANDLER) if webassets is None and site.config['USE_BUNDLES']: utils.req_missing(['webassets'], 'USE_BUNDLES', optional=True) self.logger.warn('Setting USE_BUNDLES to False.') site.config['USE_BUNDLES'] = False super(BuildBundles, self).set_site(site)
def __init__(self): """ initialize Jinja2 wrapper with extended set of filters""" if pyjade is None: req_missing(['pyjade'], 'build this site (compile Jade)') self.lookup = jinja2.Environment(extensions=['pyjade.ext.jinja.PyJadeExtension']) self.lookup.filters['tojson'] = json.dumps self.lookup.filters['istuple'] = lambda x: isinstance(x, tuple) self.lookup.globals['enumerate'] = enumerate
def get_json(self, url): if requests is None: utils.req_missing(['requests'], 'install or list available plugins', python=True, optional=False) if self.json is None: self.json = requests.get(url).json() return self.json
def compile_string(self, data, source_path=None, is_two_file=True, post=None, lang=None): """Compile Markdown into HTML strings.""" if Markdown is None: req_missing(['markdown'], 'build this site (compile Markdown)') if not is_two_file: _, data = self.split_metadata(data) output = self.converter.convert(data) output, shortcode_deps = self.site.apply_shortcodes(output, filename=source_path, with_dependencies=True, extra_context={'post': post}) return output, shortcode_deps
def _compile_string(self, nb_json): """Export notebooks as HTML strings.""" if flag is None: req_missing(['ipython[notebook]>=2.0.0'], 'build this site (compile ipynb)') c = Config(self.site.config['IPYNB_CONFIG']) c.update(get_default_jupyter_config()) exportHtml = HTMLExporter(config=c) body, _ = exportHtml.from_notebook_node(nb_json) return body
def create_post(self, path, **kw): """Create a new post.""" if flag is None: req_missing(['ipython[notebook]>=2.0.0'], 'build this site (compile ipynb)') content = kw.pop('content', None) onefile = kw.pop('onefile', False) kernel = kw.pop('ipython_kernel', None) # is_page is not needed to create the file kw.pop('is_page', False) metadata = {} metadata.update(self.default_metadata) metadata.update(kw) makedirs(os.path.dirname(path)) if content.startswith("{"): # imported .ipynb file, guaranteed to start with "{" because it’s JSON. nb = nbformat.reads(content, current_nbformat) else: if ipy_modern: nb = nbformat.v4.new_notebook() nb["cells"] = [nbformat.v4.new_markdown_cell(content)] else: nb = nbformat.new_notebook() nb["worksheets"] = [nbformat.new_worksheet(cells=[nbformat.new_text_cell('markdown', [content])])] if kernelspec is not None: if kernel is None: kernel = self.default_kernel self.logger.notice('No kernel specified, assuming "{0}".'.format(kernel)) IPYNB_KERNELS = {} ksm = kernelspec.KernelSpecManager() for k in ksm.find_kernel_specs(): IPYNB_KERNELS[k] = ksm.get_kernel_spec(k).to_dict() IPYNB_KERNELS[k]['name'] = k del IPYNB_KERNELS[k]['argv'] if kernel not in IPYNB_KERNELS: self.logger.error('Unknown kernel "{0}". Maybe you mispelled it?'.format(kernel)) self.logger.info("Available kernels: {0}".format(", ".join(sorted(IPYNB_KERNELS)))) raise Exception('Unknown kernel "{0}"'.format(kernel)) nb["metadata"]["kernelspec"] = IPYNB_KERNELS[kernel] else: # Older IPython versions don’t need kernelspecs. pass if onefile: nb["metadata"]["nikola"] = metadata with io.open(path, "w+", encoding="utf8") as fd: if ipy_modern: nbformat.write(nb, fd, 4) else: nbformat.write(nb, fd, 'ipynb')
def _execute(self, options={}, args=[]): """Import a Tumblr blog into a Nikola site.""" if not args: print(self.help()) return options['site'] = args.pop(0) if args and ('output_folder' not in args or options['output_folder'] == 'new_site'): options['output_folder'] = args.pop(0) if args: LOGGER.warn( 'You specified additional arguments ({0}). Please consider ' 'putting these arguments before the filename if you ' 'are running into problems.'.format(args)) self.import_into_existing_site = False self.url_map = {} self.timezone = None self.tumblr_url = options['site'] self.output_folder = options.get('output_folder', 'new_site') self.no_downloads = options.get('no_downloads', False) if pytumblr is None: req_missing(['pytumblr'], 'import a Tumblr site.') if requests is None: req_missing(['requests'], 'import a Tumblr site.') # Get site data via Tumblr API self.client = pytumblr.TumblrRestClient( 'iEAu2WLA7GjLSZ81Ie5ZJ0h8Jochj5TzFurxRP8a54vwBOVDcC', 'D9UkKOO9zq9VmqfNKEBZG61bwv9TMZjA4P07BkB6Y35GCfUCdJ', 'QEOkjGsWtT2kUPUpoh6tHFGjwoycHSd7Ypz6G8Pgz31NbHjFEy', 'wan0Pd7VzESpdLDN0FYqReFOE7U1GG2X0GknOuKT3kpNUHwkBK') # Name of the site to import is the first part of the URL self.site_name = urlparse(self.tumblr_url).netloc.split('.')[0] self.site_info = self.client.blog_info(self.site_name)['blog'] self.context = self.populate_context(self.site_info) self.context['SITE_URL'] = self.tumblr_url conf_template = self.generate_base_site() # Importing here because otherwise doit complains from nikola.plugins.compile.html import CompileHtml self.html_compiler = CompileHtml() self.import_posts() rendered_template = conf_template.render( **prepare_config(self.context)) rendered_template = rendered_template.replace("# PRETTY_URLS = False", "PRETTY_URLS = True") self.write_configuration(self.get_configuration_output_path(), rendered_template)
def compile_sass(target_path, destination_path): try: compiled = sass.compile(filename=target_path) except OSError: utils.req_missing([self.compiler_name], 'build Sass files (and use this theme)', False, False) with open(destination_path, "w+") as outfile: outfile.write(compiled)
def bpython(self, willful=True): """Run a bpython shell.""" try: import bpython except ImportError: if willful: req_missing(['bpython'], 'use the bpython console') raise # That’s how _execute knows whether to try something else. else: bpython.embed(banner=self.header.format('bpython'), locals_=self.context)
def compile_html(self, source, dest, is_two_file=True): if creole is None: req_missing(['creole'], 'build this site (compile CreoleWiki)') makedirs(os.path.dirname(dest)) with codecs.open(dest, "w+", "utf8") as out_file: with codecs.open(source, "r", "utf8") as in_file: data = in_file.read() document = Parser(data).parse() output = HtmlEmitter(document).emit() out_file.write(output)
def compile_html(self, source, dest, is_two_file=True): makedirs(os.path.dirname(dest)) try: subprocess.check_call(['pandoc', '-o', dest, source] + self.site.config['PANDOC_OPTIONS']) except OSError as e: if e.strreror == 'No such file or directory': req_missing(['pandoc'], 'build this site (compile with pandoc)', python=False)
def compile_html(self, source, dest, is_two_file=True): """Compile reSt into HTML.""" if not has_docutils: req_missing(['docutils'], 'build this site (compile reStructuredText)') if not has_rst2html5: req_missing( ['rst2html5'], 'build this site (compile reStructuredText into HTML5)') makedirs(os.path.dirname(dest)) error_level = 100 with io.open(dest, "w+", encoding="utf8") as out_file: with io.open(source, "r", encoding="utf8") as in_file: data = in_file.read() add_ln = 0 if not is_two_file: spl = re.split('(\n\n|\r\n\r\n)', data, maxsplit=1) data = spl[-1] if len(spl) != 1: # If errors occur, this will be added to the line # number reported by docutils so the line number # matches the actual line number (off by 7 with default # metadata, could be more or less depending on the post # author). add_ln = len(spl[0].splitlines()) + 1 default_template_path = os.path.join(os.path.dirname(__file__), 'template.txt') output, error_level, deps = rst2html( data, settings_overrides={ 'initial_header_level': 0, 'record_dependencies': True, 'stylesheet_path': None, 'link_stylesheet': True, 'syntax_highlight': 'short', 'math_output': 'mathjax', 'template': default_template_path, }, logger=self.logger, source_path=source, l_add_ln=add_ln) out_file.write(output) deps_path = dest + '.dep' if deps.list: with io.open(deps_path, "w+", encoding="utf8") as deps_file: deps_file.write('\n'.join(deps.list)) else: if os.path.isfile(deps_path): os.unlink(deps_path) if error_level < 3: return True else: return False
def compile_html(self, source, dest, is_two_file=True): makedirs(os.path.dirname(dest)) binary = self.site.config.get('ASCIIDOC_BINARY', 'asciidoc') try: subprocess.check_call( (binary, '-b', 'html5', '-s', '-o', dest, source)) except OSError as e: if e.strreror == 'No such file or directory': req_missing(['asciidoc'], 'build this site (compile with asciidoc)', python=False)
def compile_html(self, source, dest, is_two_file=True): if bbcode is None: req_missing(['bbcode'], 'build this site (compile BBCode)') makedirs(os.path.dirname(dest)) with codecs.open(dest, "w+", "utf8") as out_file: with codecs.open(source, "r", "utf8") as in_file: data = in_file.read() if not is_two_file: data = re.split('(\n\n|\r\n\r\n)', data, maxsplit=1)[-1] output = self.parser.format(data) out_file.write(output)
def compile_html(self, source, dest, is_two_file=True): if misaka is None: req_missing(['misaka'], 'build this site (compile with misaka)') makedirs(os.path.dirname(dest)) with codecs.open(dest, "w+", "utf8") as out_file: with codecs.open(source, "r", "utf8") as in_file: data = in_file.read() if not is_two_file: data = re.split('(\n\n|\r\n\r\n)', data, maxsplit=1)[-1] output = misaka.html(data, extensions=self.ext) out_file.write(output)
def compile_html(self, source, dest, is_two_file=True): if textile is None: req_missing(['textile'], 'build this site (compile Textile)') makedirs(os.path.dirname(dest)) with codecs.open(dest, "w+", "utf8") as out_file: with codecs.open(source, "r", "utf8") as in_file: data = in_file.read() if not is_two_file: data = re.split('(\n\n|\r\n\r\n)', data, maxsplit=1)[-1] output = textile(data, head_offset=1) out_file.write(output)
def render_template(self, template_name, output_name, context): """Render the template into output_name using context.""" if jinja2 is None: req_missing(['jinja2'], 'use this theme') template = self.lookup.get_template(template_name) output = template.render(**context) if output_name is not None: makedirs(os.path.dirname(output_name)) with open(output_name, 'w+') as output: output.write(output.encode('utf8')) return output
def compile(self, source, dest, is_two_file=True, post=None, lang=None): """Compile the source file into HTML and save as dest.""" if creole is None: req_missing(['creole'], 'build this site (compile CreoleWiki)') makedirs(os.path.dirname(dest)) with codecs.open(dest, "w+", "utf8") as out_file: with codecs.open(source, "r", "utf8") as in_file: data = in_file.read() document = Parser(data).parse() output = HtmlEmitter(document).emit() out_file.write(output)
def compile_target(target, dst): utils.makedirs(dst_dir) src = os.path.join(kw['cache_folder'], self.sources_folder, target) try: compiled = subprocess.check_output([self.compiler_name, src]) except OSError: utils.req_missing([self.compiler_name], 'build Sass files (and use this theme)', False, False) with open(dst, "wb+") as outf: outf.write(compiled)
def compile_string(self, source, is_two_file=True): """Export notebooks as HTML strings.""" if flag is None: req_missing(['ipython[notebook]>=2.0.0'], 'build this site (compile ipynb)') c = Config(self.site.config['IPYNB_CONFIG']) exportHtml = HTMLExporter(config=c) with io.open(source, "r", encoding="utf8") as in_file: nb_json = nbformat.read(in_file, current_nbformat) (body, resources) = exportHtml.from_notebook_node(nb_json) return body