def _execute(self, options, args): """Apply mincss the generated site.""" output_folder = self.site.config["OUTPUT_FOLDER"] if Processor is None: LOGGER.warn("To use the mincss command," ' you have to install the "mincss" package.') return p = Processor(preserve_remote_urls=False) urls = [] css_files = {} for root, dirs, files in os.walk(output_folder): for f in files: url = os.path.join(root, f) if url.endswith(".css"): fname = os.path.basename(url) if fname in css_files: LOGGER.error("You have two CSS files with the same name and that confuses me.") sys.exit(1) css_files[fname] = url if not f.endswith(".html"): continue urls.append(url) p.process(*urls) for inline in p.links: fname = os.path.basename(inline.href) with open(css_files[fname], "wb+") as outf: outf.write(inline.after)
def _execute(self, options, args): """Start the watcher.""" try: from livereload.server import start except ImportError: LOGGER.error('To use the auto command, you need to install the ' '"livereload" package.') return # Run an initial build so we are uptodate subprocess.call(("nikola", "build")) port = options and options.get('port') # Create a Guardfile with codecs.open("Guardfile", "wb+", "utf8") as guardfile: l = ["conf.py", "themes", "templates", self.site.config['GALLERY_PATH']] for item in self.site.config['post_pages']: l.append(os.path.dirname(item[0])) for item in self.site.config['FILES_FOLDERS']: l.append(os.path.dirname(item)) data = GUARDFILE.format(json.dumps(l)) guardfile.write(data) out_folder = self.site.config['OUTPUT_FOLDER'] os.chmod("Guardfile", 0o755) start(port, out_folder, options and options.get('browser'))
def _replace_tags_line(post, tags): """ Replaces the line that lists the tags, with given tags. """ source_path = post.source_path if post.is_two_file: # fixme: currently doesn't handle two post files. LOGGER.error( "Two file posts are not supported, currently." "Skipping %s" % source_path ) return with codecs.open(source_path, 'r', 'utf-8') as f: post_text = f.readlines() tag_identifier = u'.. tags:' new_tags = u'.. tags: %s\n' % ', '.join(tags) for index, line in enumerate(post_text[:]): if line.startswith(tag_identifier): post_text[index] = new_tags break with codecs.open(source_path, 'w+', 'utf-8') as f: f.writelines(post_text)
def analyze(self, task, find_sources=False): rv = False self.whitelist = [re.compile(x) for x in self.site.config['LINK_CHECK_WHITELIST']] try: filename = task.split(":")[-1] d = lxml.html.fromstring(open(filename).read()) for l in d.iterlinks(): target = l[0].attrib[l[1]] if target == "#": continue parsed = urlparse(target) if parsed.scheme or target.startswith('//'): continue if parsed.fragment: target = target.split('#')[0] target_filename = os.path.abspath( os.path.join(os.path.dirname(filename), unquote(target))) if any(re.match(x, target_filename) for x in self.whitelist): continue elif target_filename not in self.existing_targets: if os.path.exists(target_filename): self.existing_targets.add(target_filename) else: rv = True LOGGER.warn("Broken link in {0}: ".format(filename), target) if find_sources: LOGGER.warn("Possible sources:") LOGGER.warn(os.popen('nikola list --deps ' + task, 'r').read()) LOGGER.warn("===============================\n") except Exception as exc: LOGGER.error("Error with:", filename, exc) return rv
def site_context(self, site, client_templates): from nikola.utils import TranslatableSetting, LOGGER, Functionary result = {} translated_settings = {} for l in site.config['TRANSLATIONS']: translated_settings[l] = {} for k, v in site.GLOBAL_CONTEXT.items(): if k in ['template_hooks', 'get_post_data', 'timezone']: continue if callable(v): if isinstance(v, TranslatableSetting): for l in site.config['TRANSLATIONS']: translated_settings[l][k] = v.values[l] continue elif isinstance(v, Functionary): # just a callable dict pass else: LOGGER.warn('Found unserializable callable in GLOBAL_CONTEXT: %r, %s' % (k, type(v))) continue result[k] = v result['translated_settings'] = translated_settings # TODO: LEGAL_VALUES isn't exported by nikola.py! # result['lang'] in LEGAL_VALUES['RTL_LANGUAGES'] result['is_rtl'] = False result['default_lang'] = site.default_lang result['BASE_URL'] = site.config['BASE_URL'] result['client_templates'] = client_templates return result
def handleMatch(self, m): gist_id = m.group("gist_id") gist_file = m.group("filename") gist_elem = etree.Element("div") gist_elem.set("class", "gist") script_elem = etree.SubElement(gist_elem, "script") if requests: noscript_elem = etree.SubElement(gist_elem, "noscript") try: if gist_file: script_elem.set("src", GIST_FILE_JS_URL.format(gist_id, gist_file)) raw_gist = self.get_raw_gist_with_filename(gist_id, gist_file) else: script_elem.set("src", GIST_JS_URL.format(gist_id)) raw_gist = self.get_raw_gist(gist_id) # Insert source as <pre/> within <noscript> pre_elem = etree.SubElement(noscript_elem, "pre") pre_elem.text = AtomicString(raw_gist) except GistFetchException as e: LOGGER.warn(e.message) warning_comment = etree.Comment(" WARNING: {0} ".format(e.message)) noscript_elem.append(warning_comment) else: LOGGER.warn('"requests" package not installed. ' "Please install to add inline gist source.") return gist_elem
def _replace_tags_line(post, tags): """ Replaces the line that lists the tags, with given tags. """ source_path = post.source_path if post.is_two_file: # fixme: currently doesn't handle two post files. LOGGER.error("Two file posts are not supported, currently." "Skipping %s" % source_path) return with codecs.open(source_path, 'r', 'utf-8') as f: post_text = f.readlines() tag_identifier = u'.. tags:' new_tags = u'.. tags: %s\n' % ', '.join(tags) for index, line in enumerate(post_text[:]): if line.startswith(tag_identifier): post_text[index] = new_tags break with codecs.open(source_path, 'w+', 'utf-8') as f: f.writelines(post_text)
def sort_tags(site, filepaths, dry_run=False): """ Sorts all the tags in the given list of posts. $ nikola tags --sort posts/*.rst The above command will sort all tags alphabetically, in all rst posts. This command can be run on all posts, to clean up things. """ posts = [post for post in site.timeline if post.source_path in filepaths] if len(posts) == 0: LOGGER.error("Need at least one post.") return FMT = 'Tags for {0}:\n{1:>6} - {2}\n{3:>6} - {4}\n' OLD = 'old' NEW = 'new' for post in posts: new_tags = sorted(post.tags) if dry_run: print(FMT.format( post.source_path, OLD, post.tags, NEW, new_tags) ) elif new_tags != post.tags: _replace_tags_line(post, new_tags) return new_tags
def _replace_tags_line(post, tags): """ Replaces the line that lists the tags, with given tags. """ if post.is_two_file: path = post.metadata_path try: if not post.newstylemeta: LOGGER.error("{0} uses old-style metadata which is not supported by this plugin, skipping.".format(path)) return except AttributeError: # post.newstylemeta is not present in older versions. If the user # has old-style meta files, it will crash or not do the job. pass else: path = post.source_path with codecs.open(path, 'r', 'utf-8') as f: text = f.readlines() tag_identifier = u'.. tags:' new_tags = u'.. tags: %s\n' % ', '.join(tags) for index, line in enumerate(text[:]): if line.startswith(tag_identifier): text[index] = new_tags break with codecs.open(path, 'w+', 'utf-8') as f: f.writelines(text)
def plain(self): """Plain Python shell.""" from nikola import Nikola try: import conf SITE = Nikola(**conf.__dict__) SITE.scan_posts() gl = {'conf': conf, 'SITE': SITE, 'Nikola': Nikola} except ImportError: LOGGER.error("No configuration found, cannot run the console.") else: import code try: import readline except ImportError: pass else: import rlcompleter readline.set_completer(rlcompleter.Completer(gl).complete) readline.parse_and_bind("tab:complete") pythonrc = os.environ.get("PYTHONSTARTUP") if pythonrc and os.path.isfile(pythonrc): try: execfile(pythonrc) # NOQA except NameError: pass code.interact(local=gl, banner=self.header.format('Python'))
def emoji_role(name, rawtext, text, lineno, inliner, options={}, content=[]): text = text.lower() LOGGER.warn('The role :emoji:`{0}` is deprecated. Use |{0}| instead'.format(text)) node = nodes.image( uri='http://www.tortue.me/emoji/{0}.png'.format(text), alt=text, classes=['emoji'], ) return [node], []
def emoji_role(name, rawtext, text, lineno, inliner, options={}, content=[]): text = text.lower() LOGGER.warn('The role :emoji:`{0}` is deprecated. Use |{0}| instead'.format(text)) node = nodes.image( uri='https://cdnjs.cloudflare.com/ajax/libs/emojify.js/1.1.0/images/basic/{0}.png'.format(text), alt=text, classes=['emoji'], ) return [node], []
def compile_html(self, source, dest, is_two_file=True): makedirs(os.path.dirname(dest)) try: subprocess.check_call(('pandoc', '-o', dest, source)) except OSError as e: if e.strreror == 'No such file or directory': LOGGER.error('To use the pandoc compiler,' ' you have to install the "pandoc" Haskell package.') raise Exception('Cannot compile {0} -- pandoc ' 'missing'.format(source))
def emoji_role(name, rawtext, text, lineno, inliner, options={}, content=[]): text = text.lower() LOGGER.warn( 'The role :emoji:`{0}` is deprecated. Use |{0}| instead'.format(text)) node = nodes.image( uri='http://www.tortue.me/emoji/{0}.png'.format(text), alt=text, classes=['emoji'], ) return [node], []
def test_gen_tasks(self): hw = HelloWorld() hw.site = MockObject() hw.site.config = {} for i in hw.gen_tasks(): self.assertEqual(i['basename'], 'hello_world') self.assertEqual(i['uptodate'], [False]) try: self.assertIsInstance(i['actions'][0][1][0], bool) except AttributeError: LOGGER.warning('Python 2.6 is missing assertIsInstance()')
def _execute(self, options, args): """Start test server.""" out_dir = self.site.config['OUTPUT_FOLDER'] if not os.path.isdir(out_dir): LOGGER.error("Missing '{0}' folder?".format(out_dir)) else: os.chdir(out_dir) httpd = HTTPServer((options['address'], options['port']), OurHTTPRequestHandler) sa = httpd.socket.getsockname() LOGGER.notice("Serving HTTP on {0} port {1} ...".format(*sa)) httpd.serve_forever()
def ipython(self): """IPython shell.""" from nikola import Nikola try: import conf except ImportError: LOGGER.error("No configuration found, cannot run the console.") else: import IPython SITE = Nikola(**conf.__dict__) SITE.scan_posts() IPython.embed(header=self.header.format('IPython'))
def doc_shortcode(*args, **kwargs): """Implement the doc shortcode.""" text = kwargs['data'] success, twin_slugs, title, permalink, slug = _doc_link(text, text, LOGGER) if success: if twin_slugs: LOGGER.warning( 'More than one post with the same slug. Using "{0}" for doc shortcode'.format(permalink)) return '<a href="{0}">{1}</a>'.format(permalink, title) else: LOGGER.error( '"{0}" slug doesn\'t exist.'.format(slug)) return '<span class="error text-error" style="color: red;">Invalid link: {0}</span>'.format(text)
def doc_shortcode(*args, **kwargs): """Implement the doc shortcode.""" text = kwargs['data'] success, twin_slugs, title, permalink, slug = _doc_link(text, text, LOGGER) if success: if twin_slugs: LOGGER.warn( 'More than one post with the same slug. Using "{0}" for doc shortcode'.format(permalink)) return '<a href="{0}">{1}</a>'.format(permalink, title) else: LOGGER.error( '"{0}" slug doesn\'t exist.'.format(slug)) return '<span class="error text-error" style="color: red;">Invalid link: {0}</span>'.format(text)
def bpython(self): """bpython shell.""" from nikola import Nikola try: import conf except ImportError: LOGGER.error("No configuration found, cannot run the console.") else: import bpython SITE = Nikola(**conf.__dict__) SITE.scan_posts() gl = {'conf': conf, 'SITE': SITE, 'Nikola': Nikola} bpython.embed(banner=self.header.format('bpython'), locals_=gl)
def spell_check(self, post, lang): """ Check spellings for the given post and given language. """ if enchant.dict_exists(lang): checker = SpellChecker(lang, filters=[EmailFilter, URLFilter]) checker.set_text(post.text(lang=lang, strip_html=True)) words = [error.word for error in checker] words = [ word for word in words if self._not_in_other_dictionaries(word, lang) ] LOGGER.notice('Mis-spelt words in %s: %s' % (post.fragment_deps(lang), ', '.join(words))) else: LOGGER.notice('No dictionary found for %s' % lang)
def tag(self, post, count=5): """ Return a list of top tags, given a post. post: can either be a post object or the source path count: the number of tags to return """ if isinstance(post, (bytes_str, unicode_str)): source_path = post post = self._get_post_from_source_path(source_path) if post is None: LOGGER.error('No post found for path: %s' % source_path) return return self._find_top_scoring_tags(post, count)
def compile_string(self, data, source_path=None, is_two_file=True, post=None, lang=None): """Compile docstrings into HTML strings, with shortcode support.""" if not is_two_file: _, data = self.split_metadata(data, None, lang) new_data, shortcodes = sc.extract_shortcodes(data) # The way pdoc generates output is a bit inflexible path_templates = os.path.join(self.plugin_path, "tempaltes") LOGGER.info(f"set path tempaltes to {path_templates}") with tempfile.TemporaryDirectory() as tmpdir: subprocess.check_call(['pdoc', '--html', '--html-no-source', '--html-dir', tmpdir, "--template-dir", path_templates] + shlex.split(new_data.strip())) fname = os.listdir(tmpdir)[0] tmd_subdir = os.path.join(tmpdir, fname) fname = os.listdir(tmd_subdir)[0] LOGGER.info(f"tmpdir = {tmd_subdir}, fname = {fname}") with open(os.path.join(tmd_subdir, fname), 'r', encoding='utf8') as inf: output = inf.read() return self.site.apply_shortcodes_uuid(output, shortcodes, filename=source_path, extra_context={'post': post})
def doc_role(name, rawtext, text, lineno, inliner, options={}, content=[]): """Handle the doc role.""" success, twin_slugs, title, permalink, slug = _doc_link(rawtext, text, options, content) if success: if twin_slugs: inliner.reporter.warning( 'More than one post with the same slug. Using "{0}"'.format(permalink)) LOGGER.warning( 'More than one post with the same slug. Using "{0}" for doc role'.format(permalink)) node = make_link_node(rawtext, title, permalink, options) return [node], [] else: msg = inliner.reporter.error( '"{0}" slug doesn\'t exist.'.format(slug), line=lineno) prb = inliner.problematic(rawtext, rawtext, msg) return [prb], [msg]
def gen_tasks(self): """Build final pages from metadata and HTML fragments.""" kw = { "post_pages": self.site.config["post_pages"], "translations": self.site.config["TRANSLATIONS"], "filters": self.site.config["FILTERS"], "show_untranslated_posts": self.site.config['SHOW_UNTRANSLATED_POSTS'], "demote_headers": self.site.config['DEMOTE_HEADERS'], } self.site.scan_posts() yield self.group_task() index_paths = {} for lang in kw["translations"]: index_paths[lang] = False if not self.site.config[ "DISABLE_INDEXES_PLUGIN_INDEX_AND_ATOM_FEED"]: index_paths[lang] = os.path.normpath( os.path.join(self.site.config['OUTPUT_FOLDER'], self.site.path('index', '', lang=lang))) for lang in kw["translations"]: for post in self.site.timeline: if not kw[ "show_untranslated_posts"] and not post.is_translation_available( lang): continue if post.is_post: context = {'pagekind': ['post_page']} else: context = {'pagekind': ['story_page', 'page_page']} for task in self.site.generic_page_renderer( lang, post, kw["filters"], context): if task['name'] == index_paths[lang]: # Issue 3022 LOGGER.error( "Post {0!r}: output path ({1}) conflicts with the blog index ({2}). " "Please change INDEX_PATH or disable index generation." .format(post.source_path, task['name'], index_paths[lang])) task['uptodate'] = task['uptodate'] + [ config_changed(kw, 'nikola.plugins.task.pages') ] task['basename'] = self.name task['task_dep'] = ['render_posts'] yield task
def doc_role(name, rawtext, text, lineno, inliner, options={}, content=[]): """Handle the doc role.""" success, twin_slugs, title, permalink, slug = _doc_link(rawtext, text, options, content) if success: if twin_slugs: inliner.reporter.warning( 'More than one post with the same slug. Using "{0}"'.format(permalink)) LOGGER.warn( 'More than one post with the same slug. Using "{0}" for doc role'.format(permalink)) node = make_link_node(rawtext, title, permalink, options) return [node], [] else: msg = inliner.reporter.error( '"{0}" slug doesn\'t exist.'.format(slug), line=lineno) prb = inliner.problematic(rawtext, rawtext, msg) return [prb], [msg]
def after_scan(self, site): LOGGER.debug('in Series.after_scan') series_posts = defaultdict(list) series_descs = defaultdict(lambda x: x, site.config['SERIES_DESCRIPTIONS']) for post in sorted(site.posts, key=lambda p: p.date): series_tag = post.meta[site.default_lang]['series'] if series_tag: series_posts[series_tag].append(post) series_data = { tag: SeriesDescription(description=series_descs[tag], posts=posts) for tag, posts in series_posts.items() } LOGGER.debug(pformat(series_data)) site._GLOBAL_CONTEXT['series'] = series_data
def gen_tasks(self): for in_name in get_entries(os.path.join(base_path, "../examples")): name_image = os.path.basename(in_name) out_name = "output/gallery/" + name_image # in_name = "/home/blackbird/Projects_heavy/pycontextfree/examples/paper/paper.png" # out_name = "output/gallery/paper.png" # out_name = "gallery/paper.png" LOGGER.info(f"emiting copyi task {in_name} to {out_name}") yield utils.apply_filters( { 'basename': self.name, 'name': out_name, 'file_dep': [in_name], 'targets': [out_name], 'actions': [(utils.copy_file, [in_name, out_name])], 'clean': True, }, {})
def spell_check(self, post, lang): """ Check spellings for the given post and given language. """ try: dictionary = enchant.request_dict(lang) checker = SpellChecker(lang, filters=[EmailFilter, URLFilter]) checker.set_text(post.text(lang=lang, strip_html=True)) words = [error.word for error in checker] words = [ word for word in words if not dictionary.check(word) ] LOGGER.notice( 'Mis-spelt words in %s: %s' % ( post.fragment_deps(lang), ', '.join(words) ) ) except enchant.DictNotFoundError: LOGGER.notice('No dictionary found for %s' % lang)
def spell_check(self, post, lang): """ Check spellings for the given post and given language. """ if enchant.dict_exists(lang): checker = SpellChecker(lang, filters=[EmailFilter, URLFilter]) checker.set_text(post.text(lang=lang, strip_html=True)) words = [error.word for error in checker] words = [ word for word in words if self._not_in_other_dictionaries(word, lang) ] LOGGER.notice( 'Mis-spelt words in %s: %s' % ( post.fragment_deps(lang), ', '.join(words) ) ) else: LOGGER.notice('No dictionary found for %s' % lang)
def read_files(self): self.rows = [] cnt = 0 for fname in get_entries(os.path.join(base_path, "../examples")): LOGGER.info("processing " + fname) try: # data = load_json(fname) data = {} data["path"] = fname data["path_image"] = fname[:-3] + ".png" data["name"] = os.path.basename(os.path.dirname(fname)) data["url"] = "https://github.com/undertherain/pycontextfree/blob/master/examples/" + data["name"] + "/" + os.path.basename(fname) data["id"] = cnt name_image = os.path.basename(fname)[:-3] + ".png" data["image"] = "/gallery/" + name_image cnt += 1 self.rows.append(data) except Exception as e: LOGGER.warning("error processing " + fname + str(e))
def _execute(self, options, args): """Manage the tags on the site.""" try: import conf except ImportError: LOGGER.error("No configuration found, cannot run the console.") else: _reload(conf) nikola = Nikola(**conf.__dict__) nikola.scan_posts() if len(options['add']) > 0 and len(args) > 0: add_tags(nikola, options['add'], args, options['dry-run']) elif options['list']: list_tags(nikola, options['list_sorting']) elif options['merge'].count(',') > 0 and len(args) > 0: merge_tags(nikola, options['merge'], args, options['dry-run']) elif len(options['remove']) > 0 and len(args) > 0: remove_tags(nikola, options['remove'], args, options['dry-run']) elif len(options['search']) > 0: search_tags(nikola, options['search']) elif options['tag'] and len(args) > 0: tagger = _AutoTag(nikola) for post in args: tags = ','.join(tagger.tag(post)) add_tags(nikola, tags, [post], options['dry-run']) elif options['sort'] and len(args) > 0: sort_tags(nikola, args, options['dry-run']) else: print(self.help())
def handler(self, title=None, site=None, data=None, lang=None): """Create an inter-site link Args: title: optional argument to specify a different title from the post Returns: output HTML to replace the shortcode """ success, twin_slugs, title, permalink, slug = lancelot_link( site, data, title) if success: if twin_slugs: LOGGER.warning('More than one post with the same slug. ' f'Using "{permalink}" for lancelot shortcode') output = f'<a href="{permalink}">{title}</a>' else: LOGGER.error(f'"{slug}" slug doesn\'t exist.') output = ('<span class="error text-error" style="color: red;">' f'Invalid link: {data}</span>') return output, []
def compile_html(self, source, dest, is_two_file=True): """Compile reSt into HTML.""" if not has_docutils: raise Exception('To build this site, you need to install the ' '"docutils" package.') makedirs(os.path.dirname(dest)) error_level = 100 with codecs.open(dest, "w+", "utf8") as out_file: with codecs.open(source, "r", "utf8") as in_file: data = in_file.read() if not is_two_file: data = data.split('\n\n', 1)[-1] output, error_level, deps = rst2html( data, settings_overrides={ 'initial_header_level': 2, 'record_dependencies': True, 'stylesheet_path': None, 'link_stylesheet': True, 'syntax_highlight': 'short', 'math_output': 'mathjax', }) out_file.write(output) deps_path = dest + '.dep' if deps.list: with codecs.open(deps_path, "wb+", "utf8") as deps_file: deps_file.write('\n'.join(deps.list)) else: if os.path.isfile(deps_path): os.unlink(deps_path) if error_level == 2: LOGGER.warning('Docutils reports warnings on {0}'.format(source)) if error_level < 3: return True else: return False
def run(self): if 'alt' in self.options and self.ignore_alt: LOGGER.warning("Graphviz: the :alt: option is ignored, it's better to set the title of your graph.") if self.arguments: if self.content: LOGGER.warning("Graphviz: this directive can't have both content and a filename argument. Ignoring content.") f_name = self.arguments[0] # TODO: be smart about where exactly that file is located with open(f_name, 'rb') as inf: data = inf.read().decode('utf-8') else: data = '\n'.join(self.content) node_list = [] try: p = Popen([self.dot_path, '-Tsvg'], stdin=PIPE, stdout=PIPE, stderr=PIPE) svg_data, errors = p.communicate(input=data.encode('utf8')) code = p.wait() if code: # Some error document = self.state.document return [document.reporter.error( 'Error processing graph: {0}'.format(errors), line=self.lineno)] if self.embed_graph: # SVG embedded in the HTML if 'inline' in self.options: svg_data = '<span class="graphviz">{0}</span>'.format(svg_data.decode('utf8')) else: svg_data = '<p class="graphviz">{0}</p>'.format(svg_data.decode('utf8')) else: # External SVG file # TODO: there is no reason why this branch needs to be a raw # directive. It could generate regular docutils nodes and # be useful for any writer. makedirs(self.output_folder) f_name = hashlib.md5(svg_data).hexdigest() + '.svg' img_path = self.graph_path + f_name f_path = os.path.join(self.output_folder, f_name) alt = self.options.get('alt', '') with open(f_path, 'wb+') as outf: outf.write(svg_data) self.state.document.settings.record_dependencies.add(f_path) if 'inline' in self.options: svg_data = '<span class="graphviz"><img src="{0}" alt="{1}"></span>'.format(img_path, alt) else: svg_data = '<p class="graphviz"><img src="{0}" alt="{1}"></p>'.format(img_path, alt) node_list.append(nodes.raw('', svg_data, format='html')) if 'caption' in self.options and 'inline' not in self.options: node_list.append( nodes.raw('', '<p class="caption">{0}</p>'.format(self.options['caption']), format='html')) return node_list except OSError: LOGGER.error("Can't execute 'dot'") raise
def run(self): if 'alt' in self.options and self.ignore_alt: LOGGER.warning("Graphviz: the :alt: option is ignored, it's better to set the title of your graph.") if self.arguments: if self.content: LOGGER.warning("Graphviz: this directive can't have both content and a filename argument. Ignoring content.") f_name = self.arguments[0] # TODO: be smart about where exactly that file is located with open(f_name, 'rb') as inf: data = inf.read().decode('utf-8') else: data = '\n'.join(self.content) node_list = [] try: p = Popen([self.dot_path, '-Tsvg'], stdin=PIPE, stdout=PIPE, stderr=PIPE) svg_data, errors = p.communicate(input=data.encode('utf8')) code = p.wait() if code: # Some error document = self.state.document return [document.reporter.error( 'Error processing graph: {0}'.format(errors), line=self.lineno)] if self.embed_graph: # SVG embedded in the HTML if 'inline' in self.options: svg_data = '<span class="graphviz">{0}</span>'.format(svg_data) else: svg_data = '<p class="graphviz">{0}</p>'.format(svg_data) else: # External SVG file # TODO: there is no reason why this branch needs to be a raw # directive. It could generate regular docutils nodes and # be useful for any writer. makedirs(self.output_folder) f_name = hashlib.md5(svg_data).hexdigest() + '.svg' img_path = self.graph_path + f_name f_path = os.path.join(self.output_folder, f_name) alt = self.options.get('alt', '') with open(f_path, 'wb+') as outf: outf.write(svg_data) self.state.document.settings.record_dependencies.add(f_path) if 'inline' in self.options: svg_data = '<span class="graphviz"><img src="{0}" alt="{1}"></span>'.format(img_path, alt) else: svg_data = '<p class="graphviz"><img src="{0}" alt="{1}"></p>'.format(img_path, alt) node_list.append(nodes.raw('', svg_data, format='html')) if 'caption' in self.options and 'inline' not in self.options: node_list.append( nodes.raw('', '<p class="caption">{0}</p>'.format(self.options['caption']), format='html')) return node_list except OSError: LOGGER.error("Can't execute 'dot'") raise
def scan_links(self, find_sources=False): LOGGER.notice("Checking Links:") LOGGER.notice("===============") failure = False for task in os.popen('nikola list --all', 'r').readlines(): task = task.strip() if task.split(':')[0] in ( 'render_tags', 'render_archive', 'render_galleries', 'render_indexes', 'render_pages' 'render_site') and '.html' in task: if self.analyze(task, find_sources): failure = True if not failure: LOGGER.notice("All links checked.") return failure
def _execute(self, options={}, args=None): """Create a new site.""" if not args: print("Usage: nikola init folder [options]") return False target = args[0] if target is None: print(self.usage) else: if not options or not options.get('demo'): self.create_empty_site(target) LOGGER.notice('Created empty site at {0}.'.format(target)) else: self.copy_sample_site(target) LOGGER.notice("A new site with example data has been created at " "{0}.".format(target)) LOGGER.notice("See README.txt in that folder for more information.") self.create_configuration(target)
def _execute(self, command, args): # Get last succesful deploy date timestamp_path = os.path.join(self.site.config['CACHE_FOLDER'], 'lastdeploy') if self.site.config['COMMENT_SYSTEM_ID'] == 'nikolademo': LOGGER.warn("\nWARNING WARNING WARNING WARNING\n" "You are deploying using the nikolademo Disqus account.\n" "That means you will not be able to moderate the comments in your own site.\n" "And is probably not what you want to do.\n" "Think about it for 5 seconds, I'll wait :-)\n\n") time.sleep(5) deploy_drafts = self.site.config.get('DEPLOY_DRAFTS', True) deploy_future = self.site.config.get('DEPLOY_FUTURE', False) if not (deploy_drafts and deploy_future): # Remove drafts and future posts out_dir = self.site.config['OUTPUT_FOLDER'] self.site.scan_posts() for post in self.site.timeline: if (not deploy_drafts and post.is_draft) or \ (not deploy_future and post.publish_later): remove_file(os.path.join(out_dir, post.destination_path())) remove_file(os.path.join(out_dir, post.source_path)) for command in self.site.config['DEPLOY_COMMANDS']: try: with open(timestamp_path, 'rb') as inf: last_deploy = literal_eval(inf.read().strip()) except Exception: last_deploy = datetime(1970, 1, 1) # NOQA LOGGER.notice("==>", command) ret = subprocess.check_call(command, shell=True) if ret != 0: # failed deployment raise Exception("Failed deployment") LOGGER.notice("Successful deployment") new_deploy = datetime.now() # Store timestamp of successful deployment with codecs.open(timestamp_path, 'wb+', 'utf8') as outf: outf.write(repr(new_deploy))
def tearDownClass(): sys.stdout.write('\n') LOGGER.level = logging.INFO LOGGER.notice('--- END OF TESTS FOR publication_list')
def setUpClass(): LOGGER.notice('--- TESTS FOR publication_list') LOGGER.level = logging.WARNING
def tearDownClass(): sys.stdout.write('\n') LOGGER.level = logbook.NOTICE LOGGER.notice('--- END OF TESTS FOR ItemScopeNestedCompact')
def setUpClass(): LOGGER.notice('--- TESTS FOR ItemScope') LOGGER.level = logbook.WARNING
def tearDownClass(): sys.stdout.write('\n') LOGGER.level = logbook.NOTICE LOGGER.notice('--- END OF TESTS FOR tags')
def set_site(self, site): """ Map navstories config to nav_config[*] as TranslatableSettings """ # Read NAVSTORIES_SUBMENU_INDENTION and store in self.navstories_submenu_indention if 'NAVSTORIES_SUBMENU_INDENTION' in site.config: self.navstories_submenu_indention = site.config[ 'NAVSTORIES_SUBMENU_INDENTION'] nav_config = {} for i in self.conf_vars: # Read config variables in a try...except in case a variable is missing try: nav_config[i] = utils.TranslatableSetting( i, site.config[i], site.config['TRANSLATIONS']) except KeyError: # Initialize to "empty" in case config variable i is missing nav_config[i] = utils.TranslatableSetting( i, self.conf_defaults[i](), site.config['TRANSLATIONS']) site.scan_posts() # NAVIGATION_LINKS is a TranslatableSetting, values is an actual dict for lang in site.config['NAVIGATION_LINKS'].values: # navstories config for lang nav_conf_lang = {} for i in self.conf_vars: nav_conf_lang[i] = nav_config[i](lang) # Which paths are navstories active for current lang? - Must start and end with / paths = tuple(('/' + s.strip('/') + '/') for s in nav_conf_lang['NAVSTORIES_PATHS']) # Unsorted (raw) new entries, deleted as mapped to new new_raw = {} # Sorted entries as a list of top-level menu entries, later new = [] # Map site pages to new_raw structure for p in site.pages: # Generate navpath (menu) based on permalink without language prefix # If TRANSLATION[DEFAULT_LANG] = '', then "permalink_nolang = p.permalink()" is ok permalink_nolang = re.sub( r'^/' + nav_conf_lang['TRANSLATIONS'].lstrip('./') + '/?', '/', p.permalink(lang)) s_candidates = [ s for s in paths if permalink_nolang.startswith(s) ] if not s_candidates: continue # get longest path s = max(s_candidates, key=len) # Strip off the longest path in paths navpath = permalink_nolang[len(s):].strip('/').split('/') if len(navpath) == 0: # Should not happen that navpath is empty, but to prevent errors, and inform via a warning LOGGER.warn( "Page with permalink: '%s', title: '%s', not added to menu by navstories." % (p.permalink(lang), p.title(lang))) continue if lang in p.translated_to and not p.meta('hidefromnav'): # Add entry if not navpath[0] in new_raw: new_raw[navpath[0]] = [] new_raw[navpath[0]].append( self.NavNode(navpath, p.permalink(lang), p.title(lang))) # Map from new_raw to new, sorting by NAVSTORIES_MAPPING for map_key, map_txt in nav_conf_lang['NAVSTORIES_MAPPING']: # Loop over all new_raw entries, checking if it matches map_key; if match: add it and delete from new_raw if map_key in new_raw: new.append([map_txt, new_raw[map_key]]) del (new_raw[map_key]) # Add remaing new_raw entries which didn't match any map_key new.extend([[None, new_raw[_]] for _ in sorted(new_raw)]) # Map to tuple new_entries = self.map_to_menu(new) old_entries = site.config['NAVIGATION_LINKS'](lang) # Update NAVIGATION_LINKS with navstories dynamically generated entries and NAVIGATION_LINKS_POST_NAVSTORIES entries site.config['NAVIGATION_LINKS'].values[ lang] = old_entries + new_entries + nav_conf_lang[ 'NAVIGATION_LINKS_POST_NAVSTORIES'] super(NavStories, self).set_site(site)
def setUpClass(): LOGGER.notice('--- TESTS FOR tags') LOGGER.level = logbook.WARNING
def tearDownClass(): sys.stdout.write('\n') LOGGER.level = logbook.NOTICE LOGGER.notice('--- END OF TESTS FOR link_figure')
def setUpClass(): LOGGER.notice('--- TESTS FOR link_figure') LOGGER.level = logbook.WARNING
def setUpModule(): LOGGER.notice('--- TESTS FOR tags') LOGGER.level = logging.WARNING
def tearDownModule(): sys.stdout.write('\n') LOGGER.level = logging.INFO LOGGER.notice('--- END OF TESTS FOR tags')
def setUpClass(): LOGGER.notice('--- TESTS FOR helloworld')
def tearDownClass(): sys.stdout.write('\n') LOGGER.notice('--- END OF TESTS FOR helloworld')