def get_generator(self): category = Category("misc", self.settings) podcast_articles = [ get_article( "podcast title", "podcast cocntent", podcast="http://example.com/audio/test.mp3", category=category, date=datetime.datetime.now(), length="120", duration="120", ), get_article( "podcast title", "podcast cocntent", podcast="http://example.com/audio/test.mp3", category=category, date=datetime.datetime.now(), length="120", duration="120", ), ] context = get_context(**self.settings) context["articles"] = podcast_articles generator = PodcastFeedGenerator( context=context, settings=self.settings, path=self.temp_content, theme="", output_path=self.temp_output, ) generator.generate_context() return generator
def _parse_metadata(self, meta): """Parse and sanitize metadata""" _DEL = object() # Used as a sentinel FCNS = { 'tags': lambda x, y: [Tag(t, y) for t in self._to_list(x)] or _DEL, 'date': lambda x, y: get_date(x) if x else _DEL, 'modified': lambda x, y: get_date(x) if x else _DEL, 'category': lambda x, y: Category(x, y) if x else _DEL, 'author': lambda x, y: Author(x, y) if x else _DEL, 'authors': lambda x, y: [Author(a, y) for a in self._to_list(x)] or _DEL, 'default': lambda x, y: x } out = {} for k, v in meta.items(): k = k.lower() if k in self.settings['FORMATTED_FIELDS']: self._md.reset() temp = self._md.convert("\n".join(self._to_list(v))) else: temp = FCNS.get(k, FCNS["default"])(v, self.settings) if temp is not _DEL: out[k] = temp return out
def generate_context(self): """Add the articles into the shared context""" article_path = os.path.normpath( # we have to remove trailing slashes os.path.join(self.path, self.settings['ARTICLE_DIR']) ) all_articles = [] for f in self.get_files( article_path, exclude=self.settings['ARTICLE_EXCLUDES']): try: signals.article_generate_preread.send(self) content, metadata = read_file(f, settings=self.settings) except Exception, e: logger.warning(u'Could not process %s\n%s' % (f, str(e))) continue # if no category is set, use the name of the path as a category if 'category' not in metadata: if (self.settings['USE_FOLDER_AS_CATEGORY'] and os.path.dirname(f) != article_path): # if the article is in a subdirectory category = os.path.basename(os.path.dirname(f))\ .decode('utf-8') else: # if the article is not in a subdirectory category = self.settings['DEFAULT_CATEGORY'] if category != '': metadata['category'] = Category(category, self.settings) if 'date' not in metadata and self.settings.get('DEFAULT_DATE'): if self.settings['DEFAULT_DATE'] == 'fs': metadata['date'] = datetime.datetime.fromtimestamp( os.stat(f).st_ctime) else: metadata['date'] = datetime.datetime( *self.settings['DEFAULT_DATE']) signals.article_generate_context.send(self, metadata=metadata) article = Article(content, metadata, settings=self.settings, filename=f, context=self.context) if not is_valid_content(article, f): continue self.add_filename(article) if article.status == "published": if hasattr(article, 'tags'): for tag in article.tags: self.tags[tag].append(article) all_articles.append(article) elif article.status == "draft": self.drafts.append(article) else: logger.warning(u"Unknown status %s for file %s, skipping it." % (repr(unicode.encode(article.status, 'utf-8')), repr(f)))
def test_slugify_category_with_dots(self): settings = get_settings() settings['CATEGORY_SUBSTITUTIONS'] = [('Fedora QA', 'fedora.qa', True)] settings['ARTICLE_URL'] = '{category}/{slug}/' article_kwargs = self._copy_page_kwargs() article_kwargs['metadata']['category'] = Category( 'Fedora QA', settings) article_kwargs['metadata']['title'] = 'This Week in Fedora QA' article_kwargs['settings'] = settings article = Article(**article_kwargs) self.assertEqual(article.url, 'fedora.qa/this-week-in-fedora-qa/')
def test_slugify_category_author(self): settings = get_settings() settings['SLUG_SUBSTITUTIONS'] = [('C#', 'csharp')] settings['ARTICLE_URL'] = '{author}/{category}/{slug}/' settings['ARTICLE_SAVE_AS'] = '{author}/{category}/{slug}/index.html' article_kwargs = self._copy_page_kwargs() article_kwargs['metadata']['author'] = Author("O'Brien", settings) article_kwargs['metadata']['category'] = Category( 'C# & stuff', settings) article_kwargs['metadata']['title'] = 'fnord' article_kwargs['settings'] = settings article = Article(**article_kwargs) self.assertEqual(article.url, 'obrien/csharp-stuff/fnord/') self.assertEqual( article.save_as, 'obrien/csharp-stuff/fnord/index.html')
def create_articles(self, num): now = datetime.now() for i in range(num, 0, -1): date = now + timedelta(days=-i) self.articles.append( Article( "Some content", metadata={ "Title": "Title " + "{:02n}".format(i), "Date": date, "Category": Category("Cat", self.context), "Tags": [ "TagBecomesCategoryInFeed", "OtherTag", "Tag " + "{:02n}".format(i), ], "Author": Author("Author " + str(i // 10), self.context), }, ) )
def replacer(siteurl, m): what = m.group('what') value = urlparse(m.group('value')) path = value.path origin = m.group('path') # XXX Put this in a different location. if what in {'filename', 'attach'}: if path.startswith('/'): path = path[1:] else: # relative to the source path of this content path = content.get_relative_source_path( os.path.join(content.relative_dir, path)) if path not in content._context['filenames']: unquoted_path = path.replace('%20', ' ') if unquoted_path in content._context['filenames']: path = unquoted_path linked_content = content._context['filenames'].get(path) if linked_content: if what == 'attach': if isinstance(linked_content, Static): linked_content.attach_to(content) else: logger.warning( "%s used {attach} link syntax on a " "non-static file. Use {filename} instead.", content.get_relative_source_path()) origin = '/'.join((siteurl, linked_content.url)) origin = origin.replace('\\', '/') # for Windows paths. else: logger.warning( "Unable to find `%s`, skipping url replacement.", value.geturl(), extra={ 'limit_msg': ("Other resources were not found " "and their urls not replaced") }) elif what == 'category': origin = '/'.join((siteurl, Category(path, content.settings).url)) elif what == 'tag': origin = '/'.join((siteurl, Tag(path, content.settings).url)) elif what == 'index': origin = '/'.join((siteurl, content.settings['INDEX_SAVE_AS'])) elif what == 'author': origin = '/'.join((siteurl, Author(path, content.settings).url)) else: logger.warning( "Replacement Indicator '%s' not recognized, " "skipping replacement", what) # keep all other parts, such as query, fragment, etc. parts = list(value) parts[2] = origin origin = urlunparse(parts) return ''.join( (m.group('markup'), m.group('quote'), origin, m.group('quote')))
def generate_context(self): """Add the articles into the shared context""" article_path = os.path.normpath( # we have to remove trailing slashes os.path.join(self.path, self.settings['ARTICLE_DIR']) ) all_articles = [] for f in self.get_files( article_path, exclude=self.settings['ARTICLE_EXCLUDES']): try: signals.article_generate_preread.send(self) content, metadata = read_file(f, settings=self.settings) except Exception as e: logger.warning(u'Could not process %s\n%s' % (f, str(e))) continue # if no category is set, use the name of the path as a category if 'category' not in metadata: if (self.settings['USE_FOLDER_AS_CATEGORY'] and os.path.dirname(f) != article_path): # if the article is in a subdirectory category = os.path.basename(os.path.dirname(f))\ .decode('utf-8') else: # if the article is not in a subdirectory category = self.settings['DEFAULT_CATEGORY'] if category != '': metadata['category'] = Category(category, self.settings) if 'date' not in metadata and self.settings.get('DEFAULT_DATE'): if self.settings['DEFAULT_DATE'] == 'fs': metadata['date'] = datetime.datetime.fromtimestamp( os.stat(f).st_ctime) else: metadata['date'] = datetime.datetime( *self.settings['DEFAULT_DATE']) signals.article_generate_context.send(self, metadata=metadata) article = Article(content, metadata, settings=self.settings, filename=f, context=self.context) if not is_valid_content(article, f): continue self.add_filename(article) if article.status == "published": if hasattr(article, 'tags'): for tag in article.tags: self.tags[tag].append(article) all_articles.append(article) elif article.status == "draft": self.drafts.append(article) else: logger.warning(u"Unknown status %s for file %s, skipping it." % (repr(unicode.encode(article.status, 'utf-8')), repr(f))) self.articles, self.translations = process_translations(all_articles) for article in self.articles: # only main articles are listed in categories, not translations self.categories[article.category].append(article) # ignore blank authors as well as undefined if hasattr(article,'author') and article.author.name != '': self.authors[article.author].append(article) # sort the articles by date self.articles.sort(key=attrgetter('date'), reverse=True) self.dates = list(self.articles) self.dates.sort(key=attrgetter('date'), reverse=self.context['NEWEST_FIRST_ARCHIVES']) # create tag cloud tag_cloud = defaultdict(int) for article in self.articles: for tag in getattr(article, 'tags', []): tag_cloud[tag] += 1 tag_cloud = sorted(tag_cloud.items(), key=itemgetter(1), reverse=True) tag_cloud = tag_cloud[:self.settings.get('TAG_CLOUD_MAX_ITEMS')] tags = map(itemgetter(1), tag_cloud) if tags: max_count = max(tags) steps = self.settings.get('TAG_CLOUD_STEPS') # calculate word sizes self.tag_cloud = [ ( tag, int(math.floor(steps - (steps - 1) * math.log(count) / (math.log(max_count)or 1))) ) for tag, count in tag_cloud ] # put words in chaos random.shuffle(self.tag_cloud) # and generate the output :) # order the categories per name self.categories = list(self.categories.items()) self.categories.sort( key=lambda item: item[0].name, reverse=self.settings['REVERSE_CATEGORY_ORDER']) self.authors = list(self.authors.items()) self.authors.sort(key=lambda item: item[0].name) self._update_context(('articles', 'dates', 'tags', 'categories', 'tag_cloud', 'authors', 'related_posts')) signals.article_generator_finalized.send(self)
r"^---$" r"(?P<metadata>.+?)" r"^(?:---|\.\.\.)$" r"(?P<content>.*)", re.MULTILINE | re.DOTALL) DUPES_NOT_ALLOWED = \ set(k for k, v in DUPLICATES_DEFINITIONS_ALLOWED.items() if not v) - \ {"tags", "authors"} _DEL = object() YAML_METADATA_PROCESSORS = { 'tags': lambda x, y: [Tag(_strip(t), y) for t in _to_list(x)] or _DEL, 'date': lambda x, y: _parse_date(x), 'modified': lambda x, y: _parse_date(x), 'category': lambda x, y: Category(_strip(x), y) if x else _DEL, 'author': lambda x, y: Author(_strip(x), y) if x else _DEL, 'authors': lambda x, y: [Author(_strip(a), y) for a in _to_list(x)] or _DEL, 'slug': lambda x, y: _strip(x) or _DEL, 'save_as': lambda x, y: _strip(x) or _DEL, 'status': lambda x, y: _strip(x) or _DEL, } def _strip(obj): return str(obj if obj is not None else '').strip() def _to_list(obj): """Make object into a list"""