def preBuild(site): settings = site.config.get('settings', {}) collections = site.config.get('collections', {}) global COLLECTIONS global COLLECTIONS_JSON for name, conf in collections.items(): coll = Collection(conf['title'], conf['path'], conf['template'], site.path, pages=site.pages(), config=site.config) order = conf.get('order') if order: coll.sort(**order) coll.create_navigation() COLLECTIONS[name] = coll COLLECTIONS_JSON[name] = coll.serialize() global NEWS_JSON NEWS_JSON = toDict(settings, COLLECTIONS['article'].filter('news', key='category')) global DEVELOPER_NEWS_JSON COLLECTIONS['article'].pages = COLLECTIONS['releasenotes'].pages + COLLECTIONS['article'].pages COLLECTIONS['article'].sort(**order) DEVELOPER_NEWS_JSON = toDict(settings, COLLECTIONS['article'].filter('developernews', key='category'))
def preBuild(site): settings = site.config.get('settings', {}) collections = site.config.get('collections', {}) global COLLECTIONS global COLLECTIONS_JSON for name, conf in collections.items(): coll = Collection(conf['title'], conf['path'], conf['template'], site.path, pages=site.pages(), config=site.config) order = conf.get('order') if order: coll.sort(**order) coll.create_navigation() COLLECTIONS[name] = coll COLLECTIONS_JSON[name] = coll.serialize() global NEWS_JSON NEWS_JSON = toDict(settings, COLLECTIONS['article'].filter('news', key='category')) global DEVELOPER_NEWS_JSON COLLECTIONS['article'].pages = COLLECTIONS[ 'releasenotes'].pages + COLLECTIONS['article'].pages COLLECTIONS['article'].sort(**order) DEVELOPER_NEWS_JSON = toDict( settings, COLLECTIONS['article'].filter('developernews', key='category'))
def preBuild(site): global POSTS global NEWS_JSON global DEVELOPER_NEWS_JSON global CONFIG conf = os.path.join(site.path, 'config.json') CONFIG = json.load(open(conf, 'r')) # Build all the posts for page in site.pages(): if page.path.startswith(DIR): # Skip non html posts for obvious reasons if not page.path.endswith('.html'): continue # Parse headers and markdown body headers, body = parsePost(page) # Build a context for each post postContext = Context() postContext.update(headers) postContext['raw_body'] = body postContext['path'] = page.path postContext['date'] = parseDate( headers.get('date') or headers.get('created')) postContext['url'] = page.absolute_final_url postContext['tags'] = headers.get('tags') and [ h.strip() for h in headers['tags'].split(',') ] or [] postContext['category'] = headers.get('category') and [ h.strip() for h in headers['category'].split(',') ] or [] POSTS.append(postContext) # Sort the posts by date POSTS = sorted(POSTS, key=lambda x: x['date']) POSTS.reverse() indexes = xrange(0, len(POSTS)) for i in indexes: if i + 1 in indexes: POSTS[i]['prev_post'] = POSTS[i + 1] if i - 1 in indexes: POSTS[i]['next_post'] = POSTS[i - 1] NEWS_JSON = toDict(CONFIG, filterPosts(POSTS, 'news')) DEVELOPER_NEWS_JSON = toDict(CONFIG, filterPosts(POSTS, 'developernews'))
def test_toDict(self): a_post = { 'title': 'some-title', 'date': 'a-date', 'category': 'category', 'url': '/post/1', 'raw_body': 'short body', 'author': 'someone', 'tags': ['tags', 'tags2'], 'topics': ['crate'], } expected_a_post = { "id": "4034c5bce7dad3b40247b6c812b0c93c", "title": "some-title", "date": "a-date", 'tags': ['tags', 'tags2'], 'topics': ['crate'], "category": "category", "permalink": "/post/1", "content": '', "excerpt": "short body", "author": "someone" } body_26w = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nullam a tellus euismod, congue est nec, egestas diam. Phasellus sed sollicitudin lacus. Etiam rhoncus, nulla a convallis" md_body = "# HELLO WORLD" settings = {"site": "https://crate.io"} self.assertEqual(utils.toDict({}, []), []) self.assertEqual(utils.toDict({}, None), []) self.assertEqual(utils.toDict(None, None), []) # expected output self.assertEqual(utils.toDict({}, [a_post]), [expected_a_post]) # no unique checks self.assertEqual(utils.toDict({}, [a_post] * 3), [expected_a_post] * 3) # excerpts are truncated truncated_excerpt = utils.toDict({}, [dict(a_post, raw_body=body_26w)])[0] self.assertEqual(len(truncated_excerpt["excerpt"].split(" ")), len(body_26w.split(" ")) - 1) # excerpts can contain markdown, but is stripped of its tags md_excerpt = utils.toDict({}, [dict(a_post, raw_body=md_body)])[0] self.assertEqual(md_excerpt["excerpt"], "HELLO WORLD") # permalinks are prepended if a settings have a site key permalink = utils.toDict(settings, [a_post])[0] self.assertEqual(permalink["permalink"], '{0}{1}'.format(settings["site"], a_post["url"]))
def preBuild(site): global POSTS global NEWS_JSON global DEVELOPER_NEWS_JSON global CONFIG conf = os.path.join(site.path, 'config.json') CONFIG = json.load(open(conf,'r')) # Build all the posts for page in site.pages(): if page.path.startswith(DIR): # Skip non html posts for obvious reasons if not page.path.endswith('.html'): continue # Parse headers and markdown body headers, body = parsePost(page) # Build a context for each post postContext = Context() postContext.update(headers) postContext['raw_body'] = body postContext['path'] = page.path postContext['date'] = parseDate(headers.get('date') or headers.get('created')) postContext['url'] = page.absolute_final_url postContext['tags'] = headers.get('tags') and [h.strip() for h in headers['tags'].split(',')] or [] postContext['category'] = headers.get('category') and [h.strip() for h in headers['category'].split(',')] or [] POSTS.append(postContext) # Sort the posts by date POSTS = sorted(POSTS, key=lambda x: x['date']) POSTS.reverse() indexes = xrange(0, len(POSTS)) for i in indexes: if i+1 in indexes: POSTS[i]['prev_post'] = POSTS[i+1] if i-1 in indexes: POSTS[i]['next_post'] = POSTS[i-1] NEWS_JSON = toDict(CONFIG, filterPosts(POSTS, 'news')) DEVELOPER_NEWS_JSON = toDict(CONFIG, filterPosts(POSTS, 'developernews'))
def test_toDict(self): a_post = { "title": "some-title", "date": "a-date", "category": "category", "url": "/post/1", "raw_body": "short body", "author": "someone", "tags": ["tags", "tags2"], "topics": ["crate"], } expected_a_post = { "id": "4034c5bce7dad3b40247b6c812b0c93c", "title": "some-title", "date": "a-date", "tags": ["tags", "tags2"], "topics": ["crate"], "category": "category", "permalink": "/post/1", "content": "", "excerpt": "short body", "author": "someone", } body_26w = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nullam a tellus euismod, congue est nec, egestas diam. Phasellus sed sollicitudin lacus. Etiam rhoncus, nulla a convallis" md_body = "# HELLO WORLD" settings = {"site": "https://crate.io"} self.assertEqual(utils.toDict({}, []), []) self.assertEqual(utils.toDict({}, None), []) self.assertEqual(utils.toDict(None, None), []) # expected output self.assertEqual(utils.toDict({}, [a_post]), [expected_a_post]) # no unique checks self.assertEqual(utils.toDict({}, [a_post] * 3), [expected_a_post] * 3) # excerpts are truncated truncated_excerpt = utils.toDict({}, [dict(a_post, raw_body=body_26w)])[0] self.assertEqual(len(truncated_excerpt["excerpt"].split(" ")), len(body_26w.split(" ")) - 1) # excerpts can contain markdown, but is stripped of its tags md_excerpt = utils.toDict({}, [dict(a_post, raw_body=md_body)])[0] self.assertEqual(md_excerpt["excerpt"], "HELLO WORLD") # permalinks are prepended if a settings have a site key permalink = utils.toDict(settings, [a_post])[0] self.assertEqual(permalink["permalink"], "{0}{1}".format(settings["site"], a_post["url"]))
def serialize(self): return toDict(self.config.get('settings', {}), self.pages)