def generate_sitemap(app): if not ablog.builder_support(app): return blog = Blog(app) base_url = blog.blog_baseurl if not base_url: raise StopIteration Sitemap(app=app, path=app.builder.outdir, base_url=base_url) if 0: # this is to make the function a generator # and make work for Sphinx 'html-collect-pages' yield
def generate_atom_feeds(app): """Generate archive pages for all posts, categories, tags, authors, and drafts.""" if not ablog.builder_support(app): return blog = Blog(app) url = blog.blog_baseurl if not url: raise StopIteration try: from werkzeug.contrib.atom import AtomFeed except ImportError: app.warn("werkzeug is not found, continue without atom feeds support.") return feed_path = os.path.join(app.builder.outdir, blog.blog_path, 'atom.xml') feeds = [(blog.posts, blog.blog_path, feed_path, blog.blog_title, os_path_join(url, blog.blog_path, 'atom.xml'))] if blog.blog_feed_archives: for header, catalog in [ (_('Posts by'), blog.author), (_('Posts from'), blog.location), (_('Posts in'), blog.language), (_('Posts in'), blog.category), (_('Posted in'), blog.archive), (_('Posts tagged'), blog.tags), ]: for coll in catalog: # skip collections containing only drafts if not len(coll): continue folder = os.path.join(app.builder.outdir, coll.path) if not os.path.isdir(folder): os.makedirs(folder) feeds.append( (coll, coll.path, os.path.join(folder, 'atom.xml'), blog.blog_title + u' - ' + header + u' ' + text_type(coll), os_path_join(url, coll.path, 'atom.xml'))) # Config options feed_length = blog.blog_feed_length feed_fulltext = blog.blog_feed_fulltext for feed_posts, pagename, feed_path, feed_title, feed_url in feeds: feed = AtomFeed(feed_title, title_type='text', url=url, feed_url=feed_url, subtitle=blog.blog_feed_subtitle, generator=('ABlog', 'http://ablog.readthedocs.org', ablog.__version__)) for i, post in enumerate(feed_posts): if feed_length and i == feed_length: break post_url = os_path_join(url, app.builder.get_target_uri(post.docname)) if post.section: post_url += '#' + post.section if blog.blog_feed_titles: content = None else: content = post.to_html(pagename, fulltext=feed_fulltext) feed.add(post.title, content=content, title_type='text', content_type='html', author=', '.join(a.name for a in post.author), url=post_url, id=post_url, updated=post.update, published=post.date) with open(feed_path, 'w') as out: feed_str = feed.to_string() try: out.write(feed_str.encode('utf-8')) except TypeError: out.write(feed_str) if 0: # this is to make the function a generator # and make work for Sphinx 'html-collect-pages' yield
def generate_archive_pages(app): """Generate archive pages for all posts, categories, tags, authors, and drafts.""" if not ablog.builder_support(app): return blog = Blog(app) for post in blog.posts: for redirect in post.redirect: yield (redirect, { 'redirect': post.docname, 'post': post }, 'redirect.html') atom_feed = bool(blog.blog_baseurl) feed_archives = blog.blog_feed_archives blog_path = blog.blog_path for title, header, catalog in [ (_('Authors'), _('Posts by'), blog.author), (_('Locations'), _('Posts from'), blog.location), (_('Languages'), _('Posts in'), blog.language), (_('Categories'), _('Posts in'), blog.category), (_('All posts'), _('Posted in'), blog.archive), (_('Tags'), _('Posts tagged'), blog.tags), ]: if not catalog: continue context = { 'parents': [], 'title': title, 'header': header, 'catalog': catalog, 'summary': True, } yield (catalog.docname, context, 'catalog.html') for collection in catalog: if not collection: continue context = { 'parents': [], 'title': u'{} {}'.format(header, collection), 'header': header, 'collection': collection, 'summary': True, 'feed_path': collection.path if feed_archives else blog_path, 'archive_feed': atom_feed and feed_archives } context['feed_title'] = context['title'] yield (collection.docname, context, 'collection.html') #ppp = 5 #for page, i in enumerate(range(0, len(blog.posts), ppp)): if 1: context = { 'parents': [], 'title': _('All Posts'), 'header': _('All'), 'collection': blog.posts, 'summary': True, 'atom_feed': atom_feed, 'feed_path': blog.blog_path, } docname = blog.posts.docname #if page: # docname += '/' + str(page) yield (docname, context, 'collection.html') context = { 'parents': [], 'title': _('Drafts'), 'collection': blog.drafts, 'summary': True, } yield (blog.drafts.docname, context, 'collection.html')
def generate_archive_pages(app): """ Generate archive pages for all posts, categories, tags, authors, and drafts. """ if not ablog.builder_support(app): return blog = Blog(app) for post in blog.posts: for redirect in post.redirect: yield (redirect, {"redirect": post.docname, "post": post}, "redirect.html") found_docs = app.env.found_docs atom_feed = bool(blog.blog_baseurl) feed_archives = blog.blog_feed_archives blog_path = blog.blog_path for title, header, catalog in [ (_("Authors"), _("Posts by"), blog.author), (_("Locations"), _("Posts from"), blog.location), (_("Languages"), _("Posts in"), blog.language), (_("Categories"), _("Posts in"), blog.category), (_("All posts"), _("Posted in"), blog.archive), (_("Tags"), _("Posts tagged"), blog.tags), ]: if not catalog: continue context = { "parents": [], "title": title, "header": header, "catalog": catalog, "summary": True, } if catalog.docname not in found_docs: yield (catalog.docname, context, "catalog.html") for collection in catalog: if not collection: continue context = { "parents": [], "title": f"{header} {collection}", "header": header, "collection": collection, "summary": True, "feed_path": collection.path if feed_archives else blog_path, "archive_feed": atom_feed and feed_archives, } context["feed_title"] = context["title"] if collection.docname not in found_docs: yield (collection.docname, context, "collection.html") # ppp = 5 # for page, i in enumerate(range(0, len(blog.posts), ppp)): if 1: context = { "parents": [], "title": _("All Posts"), "header": _("All"), "collection": blog.posts, "summary": True, "atom_feed": atom_feed, "feed_path": blog.blog_path, } docname = blog.posts.docname # if page: # docname += '/' + str(page) yield (docname, context, "collection.html") context = {"parents": [], "title": _("Drafts"), "collection": blog.drafts, "summary": True} yield (blog.drafts.docname, context, "collection.html")
def generate_atom_feeds(app): """Generate archive pages for all posts, categories, tags, authors, and drafts.""" if not ablog.builder_support(app): return blog = Blog(app) url = blog.blog_baseurl if not url: raise StopIteration try: from werkzeug.contrib.atom import AtomFeed except ImportError: app.warn("werkzeug is not found, continue without atom feeds support.") return feed_path = os.path.join(app.builder.outdir, blog.blog_path, 'atom.xml') feeds = [(blog.posts, blog.blog_path, feed_path, blog.blog_title, os_path_join(url, blog.blog_path, 'atom.xml'))] if blog.blog_feed_archives: for header, catalog in [ (_('Posts by'), blog.author), (_('Posts from'), blog.location), (_('Posts in'), blog.language), (_('Posts in'), blog.category), (_('Posted in'), blog.archive), (_('Posts tagged'), blog.tags),]: for coll in catalog: # skip collections containing only drafts if not len(coll): continue folder = os.path.join(app.builder.outdir, coll.path) if not os.path.isdir(folder): os.makedirs(folder) feeds.append((coll, coll.path, os.path.join(folder, 'atom.xml'), blog.blog_title + u' - ' + header + u' ' + text_type(coll), os_path_join(url, coll.path, 'atom.xml'))) # Config options feed_length = blog.blog_feed_length feed_fulltext = blog.blog_feed_fulltext for feed_posts, pagename, feed_path, feed_title, feed_url in feeds: feed = AtomFeed(feed_title, title_type='text', url=url, feed_url=feed_url, subtitle=blog.blog_feed_subtitle, generator=('ABlog', 'http://ablog.readthedocs.org', ablog.__version__)) for i, post in enumerate(feed_posts): if feed_length and i == feed_length: break post_url = os_path_join( url, app.builder.get_target_uri(post.docname)) if post.section: post_url += '#' + post.section if blog.blog_feed_titles: content = None else: content = post.to_html(pagename, fulltext=feed_fulltext) feed.add(post.title, content=content, title_type='text', content_type='html', author=', '.join(a.name for a in post.author), url=post_url, id=post_url, updated=post.update, published=post.date) with open(feed_path, 'w') as out: feed_str = feed.to_string() try: out.write(feed_str.encode('utf-8')) except TypeError: out.write(feed_str) if 0: # this is to make the function a generator # and make work for Sphinx 'html-collect-pages' yield
def generate_archive_pages(app): """Generate archive pages for all posts, categories, tags, authors, and drafts.""" if not ablog.builder_support(app): return blog = Blog(app) for post in blog.posts: for redirect in post.redirect: yield (redirect, {'redirect': post.docname, 'post': post}, 'redirect.html') atom_feed = bool(blog.blog_baseurl) feed_archives = blog.blog_feed_archives blog_path = blog.blog_path for title, header, catalog in [ (_('Authors'), _('Posts by'), blog.author), (_('Locations'), _('Posts from'), blog.location), (_('Languages'), _('Posts in'), blog.language), (_('Categories'), _('Posts in'), blog.category), (_('All posts'), _('Posted in'), blog.archive), (_('Tags'), _('Posts tagged'), blog.tags),]: if not catalog: continue context = { 'parents': [], 'title': title, 'header': header, 'catalog': catalog, 'summary': True, } yield (catalog.docname, context, 'catalog.html') for collection in catalog: if not collection: continue context = { 'parents': [], 'title': u'{0} {1}'.format(header, collection), 'header': header, 'collection': collection, 'summary': True, 'feed_path': collection.path if feed_archives else blog_path, 'archive_feed': atom_feed and feed_archives } context['feed_title'] = context['title'] yield (collection.docname, context, 'collection.html') #ppp = 5 #for page, i in enumerate(range(0, len(blog.posts), ppp)): if 1: context = { 'parents': [], 'title': _('All Posts'), 'header': _('All'), 'collection': blog.posts, 'summary': True, 'atom_feed': atom_feed, 'feed_path': blog.blog_path, } docname = blog.posts.docname #if page: # docname += '/' + str(page) yield (docname, context, 'collection.html') context = { 'parents': [], 'title': _('Drafts'), 'collection': blog.drafts, 'summary': True, } yield (blog.drafts.docname, context, 'collection.html')
def generate_atom_feeds(app): """ Generate archive pages for all posts, categories, tags, authors, and drafts. """ if not ablog.builder_support(app): return blog = Blog(app) url = blog.blog_baseurl if not url: return feeds = [ ( blog.posts, blog.blog_path, os.path.join(app.builder.outdir, blog.blog_path, feed_root + ".xml"), blog.blog_title, os_path_join(url, blog.blog_path, feed_root + ".xml"), feed_templates, ) for feed_root, feed_templates in blog.blog_feed_templates.items() ] if blog.blog_feed_archives: for header, catalog in [ (_("Posts by"), blog.author), (_("Posts from"), blog.location), (_("Posts in"), blog.language), (_("Posts in"), blog.category), (_("Posted in"), blog.archive), (_("Posts tagged"), blog.tags), ]: for coll in catalog: # skip collections containing only drafts if not len(coll): continue folder = os.path.join(app.builder.outdir, coll.path) if not os.path.isdir(folder): os.makedirs(folder) for feed_root, feed_templates in blog.blog_feed_templates.items(): feeds.append( ( coll, coll.path, os.path.join(folder, feed_root + ".xml"), blog.blog_title + " - " + header + " " + text_type(coll), os_path_join(url, coll.path, feed_root + ".xml"), feed_templates, ) ) # Config options feed_length = blog.blog_feed_length feed_fulltext = blog.blog_feed_fulltext for feed_posts, pagename, feed_path, feed_title, feed_url, feed_templates in feeds: feed = FeedGenerator() feed.id(blog.blog_baseurl) feed.title(feed_title) feed.link(href=url) feed.subtitle(blog.blog_feed_subtitle) feed.link(href=feed_url, rel="self") feed.language(app.config.language) feed.generator("ABlog", ablog.__version__, "https://ablog.readthedocs.org/") for i, post in enumerate(feed_posts): if feed_length and i == feed_length: break post_url = os_path_join(url, app.builder.get_target_uri(post.docname)) if post.section: post_url += "#" + post.section if blog.blog_feed_titles: content = None else: content = post.to_html(pagename, fulltext=feed_fulltext) feed_entry = feed.add_entry() feed_entry.id(post_url) feed_entry.link(href=post_url) feed_entry.author({"name": author.name for author in post.author}) feed_entry.pubDate(post.date.astimezone()) feed_entry.updated(post.update.astimezone()) for tag in post.tags: feed_entry.category( dict( term=tag.name.strip().replace(" ", ""), label=tag.label, ) ) # Entry values that support templates title = post.title summary = "".join(paragraph.astext() for paragraph in post.excerpt) template_values = {} for element in ("title", "summary", "content"): if element in feed_templates: template_values[element] = jinja2.Template(feed_templates[element]).render(**locals()) feed_entry.title(template_values.get("title", title)) summary = template_values.get("summary", summary) if summary: feed_entry.summary(summary) content = template_values.get("content", content) if content: feed_entry.content(content=content, type="html") parent_dir = os.path.dirname(feed_path) if not os.path.isdir(parent_dir): os.makedirs(parent_dir) with open(feed_path, "w", encoding="utf-8") as out: feed_str = feed.atom_str(pretty=True) out.write(feed_str.decode()) if 0: # this is to make the function a generator # and make work for Sphinx 'html-collect-pages' yield
def generate_atom_feeds(app): """ Generate archive pages for all posts, categories, tags, authors, and drafts. """ if not ablog.builder_support(app): return blog = Blog(app) url = blog.blog_baseurl if not url: return feed_path = os.path.join(app.builder.outdir, blog.blog_path, "atom.xml") feeds = [( blog.posts, blog.blog_path, feed_path, blog.blog_title, os_path_join(url, blog.blog_path, "atom.xml"), )] if blog.blog_feed_archives: for header, catalog in [ (_("Posts by"), blog.author), (_("Posts from"), blog.location), (_("Posts in"), blog.language), (_("Posts in"), blog.category), (_("Posted in"), blog.archive), (_("Posts tagged"), blog.tags), ]: for coll in catalog: # skip collections containing only drafts if not len(coll): continue folder = os.path.join(app.builder.outdir, coll.path) if not os.path.isdir(folder): os.makedirs(folder) feeds.append(( coll, coll.path, os.path.join(folder, "atom.xml"), blog.blog_title + " - " + header + " " + text_type(coll), os_path_join(url, coll.path, "atom.xml"), )) # Config options feed_length = blog.blog_feed_length feed_fulltext = blog.blog_feed_fulltext for feed_posts, pagename, feed_path, feed_title, feed_url in feeds: feed = AtomFeed( feed_title, title_type="text", url=url, feed_url=feed_url, subtitle=blog.blog_feed_subtitle, generator=("ABlog", "https://ablog.readthedocs.org", ablog.__version__), ) for i, post in enumerate(feed_posts): if feed_length and i == feed_length: break post_url = os_path_join(url, app.builder.get_target_uri(post.docname)) if post.section: post_url += "#" + post.section if blog.blog_feed_titles: content = None else: content = post.to_html(pagename, fulltext=feed_fulltext) feed.add( post.title, content=content, title_type="text", content_type="html", author=", ".join(a.name for a in post.author), url=post_url, id=post_url, updated=post.update, published=post.date, ) parent_dir = os.path.dirname(feed_path) if not os.path.isdir(parent_dir): os.makedirs(parent_dir) with open(feed_path, "w", encoding="utf-8") as out: feed_str = feed.to_string() try: out.write(feed_str.encode("utf-8")) except TypeError: out.write(feed_str) if 0: # this is to make the function a generator # and make work for Sphinx 'html-collect-pages' yield
def generate_atom_feeds(app): """ Generate archive pages for all posts, categories, tags, authors, and drafts. """ if not ablog.builder_support(app): return blog = Blog(app) url = blog.blog_baseurl if not url: return feed_path = os.path.join(app.builder.outdir, blog.blog_path, "atom.xml") feeds = [( blog.posts, blog.blog_path, feed_path, blog.blog_title, os_path_join(url, blog.blog_path, "atom.xml"), )] if blog.blog_feed_archives: for header, catalog in [ (_("Posts by"), blog.author), (_("Posts from"), blog.location), (_("Posts in"), blog.language), (_("Posts in"), blog.category), (_("Posted in"), blog.archive), (_("Posts tagged"), blog.tags), ]: for coll in catalog: # skip collections containing only drafts if not len(coll): continue folder = os.path.join(app.builder.outdir, coll.path) if not os.path.isdir(folder): os.makedirs(folder) feeds.append(( coll, coll.path, os.path.join(folder, "atom.xml"), blog.blog_title + " - " + header + " " + text_type(coll), os_path_join(url, coll.path, "atom.xml"), )) # Config options feed_length = blog.blog_feed_length feed_fulltext = blog.blog_feed_fulltext for feed_posts, pagename, feed_path, feed_title, feed_url in feeds: feed = FeedGenerator() feed.id("http://lernfunk.de/media/654321") feed.title(feed_title) feed.link(href=url) feed.subtitle(blog.blog_feed_subtitle) feed.link(href=feed_url) feed.language("en") feed.generator("ABlog", ablog.__version__, "https://ablog.readthedocs.org") for i, post in enumerate(feed_posts): if feed_length and i == feed_length: break post_url = os_path_join(url, app.builder.get_target_uri(post.docname)) if post.section: post_url += "#" + post.section if blog.blog_feed_titles: content = None else: content = post.to_html(pagename, fulltext=feed_fulltext) feed_entry = feed.add_entry() feed_entry.id(post_url) feed_entry.title(post.title) feed_entry.link(href=post_url) feed_entry.author({"name": author.name for author in post.author}) feed_entry.pubDate(post.date.astimezone()) feed_entry.updated(post.update.astimezone()) feed_entry.content(content=content, type="html") parent_dir = os.path.dirname(feed_path) if not os.path.isdir(parent_dir): os.makedirs(parent_dir) with open(feed_path, "w", encoding="utf-8") as out: feed_str = feed.atom_str(pretty=True) out.write(feed_str.decode()) if 0: # this is to make the function a generator # and make work for Sphinx 'html-collect-pages' yield