def get(self, old_slug): # There are still cached sites from google hitting # the server, that don't match the new routing scheme. # # To compensate for this, the "OldBlogHandler" will # parse the previous slug format, and do a broad generic # query with the keywords (and title). Since old content # is basically being ported 1-to-1, if we have ANY search # results, we want to return that page via permanent redirect. pieces = list(map(str.strip, old_slug.split('-'))) idx = self.meta.search_index with idx.searcher() as searcher: q = QueryParser('title', idx.schema).parse(' OR '.join(pieces)) results = searcher.search(q, limit=1) results = clean_results(idx, results) if results.get('count', 0) > 0: # rebuild the url, and redirect PERMANENTLY # to the correct endpoing reslug = document_slug(results.results[0]) self.redirect(reslug, permanent=True) else: # we couldn't find a "missing" old article # given the slug in the new data store, we need to # return some kind of "Missing" page. self.write('That content appears to be missing.')
def generate_feed(posts, subtitle='', url="https://sowingseasons.com/feed.atom"): logger.info('generate_feed(%s)' % url) feed = AtomFeed( title="SowingSeasons", title_type="text", subtitle="takes awhile to grow anything. %s" % subtitle, subtitle_type="text", feed_url=url, url="https://sowingseasons.com", author="Blake VandeMerwe", icon="/static/img/ico_black.png", logo="/static/img/logo.png", rights="MIT LICENSE", rights_type="text", generator=("PyAtom", "https://github.com/sramana/pyatom", "1.4") ) for post in posts.results: post = DotDict(post) feed.add( title=post.title, title_type="text", content=fn_markdown(post.content), content_type="html", summary=post.summary, summary_type="text", url='https://sowingseasons.com' + document_slug(post), updated=post.modified, author="Blake VandeMerwe", published=post.modified, rights="MIT LICENSE", rights_type="text" ) return feed.to_string()