def generate_context(self): all_pages = [] hidden_pages = [] for f in self.get_files( os.path.join(self.path, self.settings["PAGE_DIR"]), exclude=self.settings["PAGE_EXCLUDES"] ): try: content, metadata = read_file(f, settings=self.settings) except Exception as e: logger.warning("Could not process %s\n%s" % (f, str(e))) continue signals.pages_generate_context.send(self, metadata=metadata) page = Page(content, metadata, settings=self.settings, source_path=f, context=self.context) if not is_valid_content(page, f): continue self.add_source_path(page) if page.status == "published": all_pages.append(page) elif page.status == "hidden": hidden_pages.append(page) else: logger.warning("Unknown status %s for file %s, skipping it." % (repr(page.status), repr(f))) self.pages, self.translations = process_translations(all_pages) self.hidden_pages, self.hidden_translations = process_translations(hidden_pages) self._update_context(("pages",)) self.context["PAGES"] = self.pages
def generate_vocabulary_context(self, vocabulary_file_name, path_to_vocabulary): logger.debug("Generating__ vocabulary context for " + path_to_vocabulary + "/" + vocabulary_file_name) voc = self.get_cached_data(vocabulary_file_name, None) if voc is None: try: voc = self.readers.read_file( base_path=path_to_vocabulary, path=vocabulary_file_name, content_class=Vocabulary, context=self.context, preread_signal=voc_generator_preread, preread_sender=self, context_signal=voc_generator_context, context_sender=self) except Exception as e: logger.error('Could not process %s\n%s', vocabulary_file_name, e, exc_info=self.settings.get('DEBUG', False)) self._add_failed_source_path(vocabulary_file_name) if not is_valid_content(voc, vocabulary_file_name): self._add_failed_source_path(vocabulary_file_name) self.cache_data(vocabulary_file_name, voc) self.vocabularies.append(voc) self.add_source_path(voc)
def generate_vocabulary_context( self, vocabulary_file_name, path_to_vocabulary): logger.debug("Generating__ vocabulary context for "+ path_to_vocabulary+"/"+vocabulary_file_name) voc = self.get_cached_data(vocabulary_file_name, None) if voc is None: try: voc = self.readers.read_file( base_path=path_to_vocabulary, path=vocabulary_file_name, content_class=Vocabulary, context=self.context, preread_signal=voc_generator_preread, preread_sender=self, context_signal=voc_generator_context, context_sender=self) except Exception as e: logger.error( 'Could not process %s\n%s', vocabulary_file_name, e, exc_info=self.settings.get('DEBUG', False)) self._add_failed_source_path(vocabulary_file_name) if not is_valid_content(voc, vocabulary_file_name): self._add_failed_source_path(vocabulary_file_name) self.cache_data(vocabulary_file_name, voc) self.vocabularies.append(voc) self.add_source_path(voc)
def generate_context(self): all_recipes = [] for f in self.get_files( self.settings.get('RECIPE_DIR', 'recipes'), exclude=self.settings.get('RECIPE_EXCLUDES', ())): try: recipe = self.readers.read_file( base_path=self.path, path=f, content_class=Recipe, context=self.context, preread_signal=signals.recipe_generator_preread, preread_sender=self, context_signal=signals.recipe_generator_context, context_sender=self) except Exception as e: logger.warning('Could not process {}\n{}'.format(f, e)) continue if not is_valid_content(recipe, f): continue self.add_source_path(recipe) all_recipes.append(recipe) self.recipes = all_recipes self._update_context(('recipes', )) self.context['RECIPES'] = self.recipes signals.recipe_generator_finalized.send(self)
def generate_context(self): all_sessions = [] for f in self.get_files( os.path.join(self.path, self.settings['SESSION_DIR']), exclude=self.settings['SESSION_EXCLUDES']): try: content, metadata = read_file(f, settings=self.settings) split_and_strip(metadata, 'speakers') split_and_strip(metadata, 'bios') except Exception, e: logger.error(u'Could not process %s\n%s' % (f, unicode(e))) continue session = Session(content, metadata, settings=self.settings, source_path=f, context=self.context) if not is_valid_content(session, f): continue self.add_source_path(session) if session.status == "published": if hasattr(session, 'tags'): for tag in session.tags: conference.sessions.by_tag[tag].append(session) conference.add_session(session) elif session.status == "draft": self.drafts.append(session) else: logger.error(u"Unknown status %s for file %s, skipping it." % (repr(unicode.encode(session.status, 'utf-8')), repr(f)))
def generate_context(self): all_pages = [] hidden_pages = [] for f in self.get_files( os.path.join(self.path, self.settings['PAGE_DIR']), exclude=self.settings['PAGE_EXCLUDES']): try: content, metadata = read_file(f, settings=self.settings) except Exception as e: logger.warning(u'Could not process %s\n%s' % (f, str(e))) continue signals.pages_generate_context.send(self, metadata=metadata) page = Page(content, metadata, settings=self.settings, filename=f, context=self.context) if not is_valid_content(page, f): continue self.add_filename(page) if page.status == "published": all_pages.append(page) elif page.status == "hidden": hidden_pages.append(page) else: logger.warning(u"Unknown status %s for file %s, skipping it." % (repr(unicode.encode(page.status, 'utf-8')), repr(f))) self.pages, self.translations = process_translations(all_pages) self.hidden_pages, self.hidden_translations = process_translations(hidden_pages) self._update_context(('pages', )) self.context['PAGES'] = self.pages
def generate_context(self): all_pages = [] hidden_pages = [] for f in self.get_files( os.path.join(self.path, self.settings['PAGE_DIR']), exclude=self.settings['PAGE_EXCLUDES']): try: content, metadata = read_file(f, settings=self.settings) except Exception, e: logger.warning(u'Could not process %s\n%s' % (f, str(e))) continue signals.pages_generate_context.send(self, metadata=metadata) page = Page(content, metadata, settings=self.settings, filename=f, context=self.context) if not is_valid_content(page, f): continue self.add_filename(page) if page.status == "published": all_pages.append(page) elif page.status == "hidden": hidden_pages.append(page) else: logger.warning(u"Unknown status %s for file %s, skipping it." % (repr(unicode.encode(page.status, 'utf-8')), repr(f)))
def generate_context(self): """Add the articles into the shared context""" article_path = os.path.normpath( # we have to remove trailing slashes os.path.join(self.path, self.settings['ARTICLE_DIR']) ) all_articles = [] for f in self.get_files( article_path, exclude=self.settings['ARTICLE_EXCLUDES']): try: signals.article_generate_preread.send(self) content, metadata = read_file(f, settings=self.settings) except Exception, e: logger.warning(u'Could not process %s\n%s' % (f, str(e))) continue # if no category is set, use the name of the path as a category if 'category' not in metadata: if (self.settings['USE_FOLDER_AS_CATEGORY'] and os.path.dirname(f) != article_path): # if the article is in a subdirectory category = os.path.basename(os.path.dirname(f))\ .decode('utf-8') else: # if the article is not in a subdirectory category = self.settings['DEFAULT_CATEGORY'] if category != '': metadata['category'] = Category(category, self.settings) if 'date' not in metadata and self.settings.get('DEFAULT_DATE'): if self.settings['DEFAULT_DATE'] == 'fs': metadata['date'] = datetime.datetime.fromtimestamp( os.stat(f).st_ctime) else: metadata['date'] = datetime.datetime( *self.settings['DEFAULT_DATE']) signals.article_generate_context.send(self, metadata=metadata) article = Article(content, metadata, settings=self.settings, filename=f, context=self.context) if not is_valid_content(article, f): continue self.add_filename(article) if article.status == "published": if hasattr(article, 'tags'): for tag in article.tags: self.tags[tag].append(article) all_articles.append(article) elif article.status == "draft": self.drafts.append(article) else: logger.warning(u"Unknown status %s for file %s, skipping it." % (repr(unicode.encode(article.status, 'utf-8')), repr(f)))
def generate_context(self): """ Here is the meat of the class - where the heavy lifting occurs. It generates a list of tutorials and places them in the context object so we can access them in templates. Some of this is leftover from the stock Article class. Ideally those aspect will be removed as it is shown they can be safely done away with. However, this works. """ all_tutorials = [] hidden_tutorials = [] for f in self.get_files( self.settings['TUTORIAL_DIR'], exclude=self.settings['TUTORIAL_EXCLUDES']): tutorial = self.get_cached_data(f, None) if tutorial is None: try: tutorial = self.readers.read_file( base_path=self.path, path=f, content_class=Tutorial, context=self.context, preread_signal=tutorial_generator_preread, preread_sender=self, context_signal=tutorial_generator_context, context_sender=self) except Exception as e: logger.warning('Could not process {}\n{}'.format(f, e)) continue if not is_valid_content(tutorial, f): continue self.cache_data(f, tutorial) self.add_source_path(tutorial) if tutorial.status == "published": all_tutorials.append(tutorial) for author_data in self.context['site_authors']: if author_data.name == tutorial.author.name: tutorial.author.data = author_data elif tutorial.status == "hidden": hidden_tutorials.append(tutorial) else: logger.warning("Unknown status %s for file %s, skipping it." % (repr(tutorial.status), repr(f))) self.tutorials, self.translations = process_translations(all_tutorials) self.hidden_tutorials, self.hidden_translations = ( process_translations(hidden_tutorials)) self._update_context(('tutorials', )) self.context['TUTORIALS'] = self.tutorials self.save_cache() self.readers.save_cache() tutorial_generator_finalized.send(self)
def generate_context(self): """change the context""" # return the list of files to use files = self.get_files(self.path, exclude=['pages',]) all_articles = [] for f in files: try: content, metadata = read_file(f, settings=self.settings) except Exception, e: warning(u'Could not process %s\n%s' % (f, str(e))) continue # if no category is set, use the name of the path as a category if 'category' not in metadata.keys(): if os.path.dirname(f) == self.path: category = self.settings['DEFAULT_CATEGORY'] else: category = os.path.basename(os.path.dirname(f)).decode('utf-8') if category != '': metadata['category'] = unicode(category) if 'date' not in metadata.keys()\ and self.settings['FALLBACK_ON_FS_DATE']: metadata['date'] = datetime.fromtimestamp(os.stat(f).st_ctime) article = Article(content, metadata, settings=self.settings, filename=f) if not is_valid_content(article, f): continue add_to_url = u'' if 'ARTICLE_PERMALINK_STRUCTURE' in self.settings: article_permalink_structure = self.settings['ARTICLE_PERMALINK_STRUCTURE'] article_permalink_structure = article_permalink_structure.lstrip('/').replace('%(', "%%(") # try to substitute any python datetime directive add_to_url = article.date.strftime(article_permalink_structure) # try to substitute any article metadata in rest file add_to_url = add_to_url % article.__dict__ add_to_url = [slugify(i) for i in add_to_url.split('/')] add_to_url = os.path.join(*add_to_url) article.url = urlparse.urljoin(add_to_url, article.url) article.save_as = urlparse.urljoin(add_to_url, article.save_as) if article.status == "published": if hasattr(article, 'tags'): for tag in article.tags: self.tags[tag].append(article) all_articles.append(article) elif article.status == "draft": self.drafts.append(article) elif article.status == "noindex": self.noindex.append(article)
def generate_context(self): all_pages = [] hidden_pages = [] for f in self.get_files(self.settings['PAGE_PATHS'], exclude=self.settings['PAGE_EXCLUDES']): page = self.get_cached_data(f, None) if page is None: try: page = self.readers.read_file( base_path=self.path, path=f, content_class=Page, context=self.context, preread_signal=signals.page_generator_preread, preread_sender=self, context_signal=signals.page_generator_context, context_sender=self) except Exception as e: logger.error('Could not process %s\n%s', f, e, exc_info=self.settings.get('DEBUG', False)) self._add_failed_source_path(f) continue if not is_valid_content(page, f): self._add_failed_source_path(f) continue if page.status.lower() == "published": all_pages.append(page) elif page.status.lower() == "hidden": hidden_pages.append(page) else: logger.error( "Unknown status '%s' for file %s, skipping it.", page.status, f) self._add_failed_source_path(f) continue self.cache_data(f, page) self.add_source_path(page) self.pages, self.translations = process_translations( all_pages, order_by=self.settings['PAGE_ORDER_BY']) self.hidden_pages, self.hidden_translations = ( process_translations(hidden_pages)) self._update_context(('pages', )) self.context['PAGES'] = self.pages self.save_cache() self.readers.save_cache() signals.page_generator_finalized.send(self)
def generate_context(self): """change the context""" # return the list of files to use files = self.get_files(self.path, exclude=[ 'pages', ]) all_articles = [] for f in files: content, metadatas = read_file(f) # if no category is set, use the name of the path as a category if 'category' not in metadatas.keys(): category = os.path.basename(os.path.dirname(f)) if category == self.path: category = self.settings['DEFAULT_CATEGORY'] if category != '': metadatas['category'] = unicode(category) if 'date' not in metadatas.keys()\ and self.settings['FALLBACK_ON_FS_DATE']: metadatas['date'] = datetime.fromtimestamp(os.stat(f).st_ctime) article = Article(content, metadatas, settings=self.settings, filename=f) if not is_valid_content(article, f): continue if hasattr(article, 'tags'): for tag in article.tags: self.tags[tag].append(article) all_articles.append(article) self.articles, self.translations = process_translations(all_articles) for article in self.articles: # only main articles are listed in categories, not translations self.categories[article.category].append(article) # sort the articles by date self.articles.sort(key=attrgetter('date'), reverse=True) self.dates = list(self.articles) self.dates.sort(key=attrgetter('date'), reverse=self.context['REVERSE_ARCHIVE_ORDER']) # and generate the output :) # order the categories per name self.categories = list(self.categories.items()) self.categories.sort( reverse=self.settings.get('REVERSE_CATEGORY_ORDER')) self._update_context(('articles', 'dates', 'tags', 'categories'))
def generate_context(self): """ Here is the meat of the class - where the heavy lifting occurs. It generates a list of events and places them in the context object so we can access them in templates. Some of this is leftover from the stock Article class. Ideally those aspect will be removed as it is shown they can be safely done away with. However, this works. """ all_events = [] hidden_events = [] for f in self.get_files(self.settings["EVENT_DIR"], exclude=self.settings["EVENT_EXCLUDES"]): event = self.get_cached_data(f, None) if event is None: try: event = self.readers.read_file( base_path=self.path, path=f, content_class=Event, context=self.context, preread_signal=event_generator_preread, preread_sender=self, context_signal=event_generator_context, context_sender=self, ) except Exception as e: logger.warning("Could not process {}\n{}".format(f, e)) continue if not is_valid_content(event, f): continue self.cache_data(f, event) self.add_source_path(event) if event.status == "published": all_events.append(event) elif event.status == "hidden": hidden_events.append(event) else: logger.warning("Unknown status %s for file %s, skipping it." % (repr(event.status), repr(f))) self.events, self.translations = process_translations(all_events) self.hidden_events, self.hidden_translations = process_translations(hidden_events) self._update_context(("events",)) self.context["EVENTS"] = self.events self.save_cache() self.readers.save_cache() event_generator_finalized.send(self)
def generate_context(self): all_pages = [] for f in self.get_files(os.sep.join((self.path, 'pages'))): try: content, metadata = read_file(f) except Exception, e: error(u'Could not process %s\n%s' % (filename, str(e))) continue page = Page(content, metadata, settings=self.settings, filename=f) if not is_valid_content(page, f): continue all_pages.append(page)
def generate_context(self): all_pages = [] hidden_pages = [] for f in self.get_files( self.settings['PAGE_PATHS'], exclude=self.settings['PAGE_EXCLUDES']): page = self.get_cached_data(f, None) if page is None: try: page = self.readers.read_file( base_path=self.path, path=f, content_class=Page, context=self.context, preread_signal=signals.page_generator_preread, preread_sender=self, context_signal=signals.page_generator_context, context_sender=self) except Exception as e: logger.error( 'Could not process %s\n%s', f, e, exc_info=self.settings.get('DEBUG', False)) self._add_failed_source_path(f) continue if not is_valid_content(page, f): self._add_failed_source_path(f) continue if page.status.lower() == "published": all_pages.append(page) elif page.status.lower() == "hidden": hidden_pages.append(page) else: logger.error( "Unknown status '%s' for file %s, skipping it.", page.status, f) self._add_failed_source_path(f) continue self.cache_data(f, page) self.add_source_path(page) self.pages, self.translations = process_translations( all_pages, order_by=self.settings['PAGE_ORDER_BY']) self.hidden_pages, self.hidden_translations = \ process_translations(hidden_pages) self._update_context(('pages', 'hidden_pages')) self.save_cache() self.readers.save_cache() signals.page_generator_finalized.send(self)
def find_albums(self, path=(), parent=None): album_path = os.path.join(self.path, self.settings['ALBUM_PATH'], *path) location = '/'.join(path) album = AlbumContent(location, self.settings) if parent: parent.albums.append(album) # Images don't have titles, use the basename instead. image_settings = dict(self.settings, SLUGIFY_SOURCE='basename') for filename in os.listdir(album_path): f = os.path.join(album_path, filename) if os.path.isdir(f): self.find_albums(path + (filename, ), album) else: try: PILImage.open(f) image = album.add_image(filename) page = Image('', settings=image_settings, source_path=filename) page.image = image image.page = page self.add_source_path(page) self.image_pages.append(page) except IOError: try: page = self.readers.read_file( base_path=self.path, path=f, content_class=Album, context=self.context, ) self.add_source_path(page) except Exception as e: logger.error('Could not process %s\n%s', f, e, exc_info=self.settings.get( 'DEBUG', False)) self._add_failed_source_path(f) continue if not is_valid_content(page, f): self._add_failed_source_path(f) continue page.album = album self.album_pages.append(page) album.pages.append(page)
def generate_context(self): all_pages = [] for f in self.get_files(os.sep.join((self.path, 'pages'))): content, metadatas = read_file(f) page = Page(content, metadatas, settings=self.settings, filename=f) if not is_valid_content(page, f): continue all_pages.append(page) self.pages, self.translations = process_translations(all_pages) self._update_context(('pages', )) self.context['PAGES'] = self.pages
def generate_context(self): """change the context""" # return the list of files to use files = self.get_files(self.path, exclude=['pages',]) all_articles = [] for f in files: content, metadatas = read_file(f) # if no category is set, use the name of the path as a category if 'category' not in metadatas.keys(): category = os.path.basename(os.path.dirname(f)) if category == self.path: category = self.settings['DEFAULT_CATEGORY'] if category != '': metadatas['category'] = unicode(category) if 'date' not in metadatas.keys()\ and self.settings['FALLBACK_ON_FS_DATE']: metadatas['date'] = datetime.fromtimestamp(os.stat(f).st_ctime) article = Article(content, metadatas, settings=self.settings, filename=f) if not is_valid_content(article, f): continue if hasattr(article, 'tags'): for tag in article.tags: self.tags[tag].append(article) all_articles.append(article) self.articles, self.translations = process_translations(all_articles) for article in self.articles: # only main articles are listed in categories, not translations self.categories[article.category].append(article) # sort the articles by date self.articles.sort(key=attrgetter('date'), reverse=True) self.dates = list(self.articles) self.dates.sort(key=attrgetter('date'), reverse=self.context['REVERSE_ARCHIVE_ORDER']) # and generate the output :) # order the categories per name self.categories = list(self.categories.items()) self.categories.sort(reverse=self.settings.get('REVERSE_CATEGORY_ORDER')) self._update_context(('articles', 'dates', 'tags', 'categories'))
def generate_context(self): all_pages = [] for f in self.get_files(os.path.join(self.path, self.settings['PAGE_DIR']), exclude=self.settings['PAGE_EXCLUDES']): try: content, metadata = read_file(f) except Exception, e: logger.error(u'Could not process %s\n%s' % (f, str(e))) continue page = Page(content, metadata, settings=self.settings, filename=f) if not is_valid_content(page, f): continue all_pages.append(page)
def generate_context(self): all_pages = [] for f in self.get_files(os.sep.join((self.path, 'pages'))): content, metadata = read_file(f) page = Page(content, metadata, settings=self.settings, filename=f) if not is_valid_content(page, f): continue all_pages.append(page) self.pages, self.translations = process_translations(all_pages) self._update_context(('pages', )) self.context['PAGES'] = self.pages
def generate_context(self): all_pages = [] for f in self.get_files( os.path.join(self.path, self.settings["PAGE_DIR"]), exclude=self.settings["PAGE_EXCLUDES"] ): try: content, metadata = read_file(f) except Exception, e: logger.error(u"Could not process %s\n%s" % (f, str(e))) continue page = Page(content, metadata, settings=self.settings, filename=f) if not is_valid_content(page, f): continue all_pages.append(page)
def generate_context(self): """change the context""" article_path = os.path.normpath( # we have to remove trailing slashes os.path.join(self.path, self.settings["ARTICLE_DIR"]) ) all_articles = [] for f in self.get_files(article_path, exclude=self.settings["ARTICLE_EXCLUDES"]): try: content, metadata = read_file(f, settings=self.settings) except Exception, e: logger.warning(u"Could not process %s\n%s" % (f, str(e))) continue # if no category is set, use the name of the path as a category if "category" not in metadata: if os.path.dirname(f) == article_path: # if the article is not in a subdirectory category = self.settings["DEFAULT_CATEGORY"] else: category = os.path.basename(os.path.dirname(f)).decode("utf-8") if category != "": metadata["category"] = Category(category, self.settings) if "date" not in metadata and self.settings["DEFAULT_DATE"]: if self.settings["DEFAULT_DATE"] == "fs": metadata["date"] = datetime.datetime.fromtimestamp(os.stat(f).st_ctime) else: metadata["date"] = datetime.datetime(*self.settings["DEFAULT_DATE"]) signals.article_generate_context.send(self, metadata=metadata) article = Article(content, metadata, settings=self.settings, filename=f) if not is_valid_content(article, f): continue if article.status == "published": if hasattr(article, "tags"): for tag in article.tags: self.tags[tag].append(article) all_articles.append(article) elif article.status == "draft": self.drafts.append(article) else: logger.warning( u"Unknown status %s for file %s, skipping it." % (repr(unicode.encode(article.status, "utf-8")), repr(f)) )
def generate_context(self): def b_path(left, right): return os.path.join(left, os.path.basename(right)) def db_path(left, right): return os.path.join(left, os.path.basename(os.path.dirname(right)), os.path.basename(right)) for docpath in self.settings['DOCS_PATH']: abspath = os.path.join(self.path, docpath, '*.html') for op_abspath in glob.glob(abspath): op_relpath = b_path(docpath, op_abspath) if not os.path.isfile(op_abspath): continue page = self._doc_read_file(op_relpath, True) self.doc_html.append(DocumentationContainer(page)) if not os.path.isdir(op_abspath[:-5]): continue mp_abspath = os.path.join(op_abspath[:-5], '*.html') for mp_html_abspath in glob.glob(mp_abspath): mp_html_relpath = db_path(docpath, mp_html_abspath) if not os.path.isfile(mp_html_abspath): continue page = self._doc_read_file(mp_html_relpath, False) self.doc_html[-1].add_mpage(page) for docpath in self.settings['DOCS_PATH']: abspath = os.path.join(self.path, docpath, '*.rst') for op_abspath in glob.glob(abspath): op_relpath = b_path(docpath, op_abspath) if not os.path.isfile(op_abspath): continue page = None try: page = self.readers.read_file(base_path=self.path, path=op_relpath, content_class=Documentation, context=self.context) except Exception as e: logger.error('Could not process %s\n%s', op_relpath, e, exc_info=self.settings.get('DEBUG', False)) continue if not is_valid_content(page, op_relpath): continue if page: self.doc_rst.append(DocumentationContainer(page))
def find_albums(self, path=(), parent=None): album_path = os.path.join(self.path, self.settings['ALBUM_PATH'], *path) location = '/'.join(path) album = AlbumContent(location, self.settings) if parent: parent.albums.append(album) # Images don't have titles, use the basename instead. image_settings = dict(self.settings, SLUGIFY_SOURCE='basename') for filename in os.listdir(album_path): f = os.path.join(album_path, filename) if os.path.isdir(f): self.find_albums(path + (filename,), album) else: try: PILImage.open(f) image = album.add_image(filename) page = Image('', settings=image_settings, source_path=filename) page.image = image image.page = page self.add_source_path(page) self.image_pages.append(page) except IOError: try: page = self.readers.read_file( base_path=self.path, path=f, content_class=Album, context=self.context, ) self.add_source_path(page) except Exception as e: logger.error('Could not process %s\n%s', f, e, exc_info=self.settings.get('DEBUG', False)) self._add_failed_source_path(f) continue if not is_valid_content(page, f): self._add_failed_source_path(f) continue page.album = album self.album_pages.append(page) album.pages.append(page)
def generate_context(self): all_pages = [] hidden_pages = [] for f in self.get_files(self.settings['PAGE_DIR'], exclude=self.settings['PAGE_EXCLUDES']): page = self.get_cached_data(f, None) if page is None: try: page = self.readers.read_file( base_path=self.path, path=f, content_class=Page, context=self.context, preread_signal=signals.page_generator_preread, preread_sender=self, context_signal=signals.page_generator_context, context_sender=self) except Exception as e: logger.warning('Could not process {}\n{}'.format(f, e)) continue if not is_valid_content(page, f): continue self.cache_data(f, page) self.add_source_path(page) if page.status == "published": all_pages.append(page) elif page.status == "hidden": hidden_pages.append(page) else: logger.warning("Unknown status %s for file %s, skipping it." % (repr(page.status), repr(f))) self.pages, self.translations = process_translations(all_pages) self.hidden_pages, self.hidden_translations = ( process_translations(hidden_pages)) self._update_context(('pages', )) self.context['PAGES'] = self.pages self.save_cache() self.readers.save_cache() signals.page_generator_finalized.send(self)
def generate_context(self): for author_file in self.get_files( self.settings.get("AUTHOR_DIR", "authors"), exclude=self.settings.get("AUTHOR_EXCLUDES", "") ): try: author = self.readers.read_file( base_path=self.path, path=author_file, content_class=AuthorBiography, context=self.context ) except Exception as e: logger.warning("Could not process author {0}\n{1}".format(author_file, e)) continue if is_valid_content(author, author_file): self.authors_info.add(author) self._update_context(("authors_info",)) self.context["authors_info"] = self.authors_info
def generate_context(self): all_pages = [] hidden_pages = [] for f in self.get_files( self.settings['PAGE_DIR'], exclude=self.settings['PAGE_EXCLUDES']): page = self.get_cached_data(f, None) if page is None: try: page = self.readers.read_file( base_path=self.path, path=f, content_class=Page, context=self.context, preread_signal=signals.page_generator_preread, preread_sender=self, context_signal=signals.page_generator_context, context_sender=self) except Exception as e: logger.warning('Could not process {}\n{}'.format(f, e)) continue if not is_valid_content(page, f): continue self.cache_data(f, page) self.add_source_path(page) if page.status == "published": all_pages.append(page) elif page.status == "hidden": hidden_pages.append(page) else: logger.warning("Unknown status %s for file %s, skipping it." % (repr(page.status), repr(f))) self.pages, self.translations = process_translations(all_pages) self.hidden_pages, self.hidden_translations = ( process_translations(hidden_pages)) self._update_context(('pages', )) self.context['PAGES'] = self.pages self.save_cache() self.readers.save_cache() signals.page_generator_finalized.send(self)
def generate_context(self): def b_path(left, right): return os.path.join(left, os.path.basename(right)) def db_path(left, right): return os.path.join(left, os.path.basename(os.path.dirname(right)), os.path.basename(right)) for docpath in self.settings['DOCS_PATH']: abspath = os.path.join(self.path, docpath, '*.html') for op_abspath in glob.glob(abspath): op_relpath = b_path(docpath, op_abspath) if not os.path.isfile(op_abspath): continue page = self._doc_read_file(op_relpath, True) self.doc_html.append(DocumentationContainer(page)) if not os.path.isdir(op_abspath[:-5]): continue mp_abspath = os.path.join(op_abspath[:-5], '*.html') for mp_html_abspath in glob.glob(mp_abspath): mp_html_relpath = db_path(docpath, mp_html_abspath) if not os.path.isfile(mp_html_abspath): continue page = self._doc_read_file(mp_html_relpath, False) self.doc_html[-1].add_mpage(page) for docpath in self.settings['DOCS_PATH']: abspath = os.path.join(self.path, docpath, '*.rst') for op_abspath in glob.glob(abspath): op_relpath = b_path(docpath, op_abspath) if not os.path.isfile(op_abspath): continue page = None try: page = self.readers.read_file( base_path = self.path, path = op_relpath, content_class = Documentation, context = self.context) except Exception as e: logger.error('Could not process %s\n%s', op_relpath, e, exc_info=self.settings.get('DEBUG', False)) continue if not is_valid_content(page, op_relpath): continue if page: self.doc_rst.append(DocumentationContainer(page))
def generate_context(self): for f in self.get_files( os.path.join(self.path, self.settings['BIO_DIR']), exclude=self.settings['BIO_EXCLUDES']): try: content, metadata = read_file(f, settings=self.settings) split_and_strip(metadata, 'roles') except Exception, e: logger.warning(u'Could not process %s\n%s' % (f, str(e))) continue bio = Bio(content, metadata, settings=self.settings, source_path=f, context=self.context) if not is_valid_content(bio, f): continue self.add_source_path(bio) conference.add_bio(bio)
def generate_context(self): # Only use YamlReader, don't ingore anything, but only read files with yml extension. self._patch_readers() # Custom settings for self.get_files saved_ignore_files = self.settings['IGNORE_FILES'] self.settings['IGNORE_FILES'] = YAML_GENERATOR_IGNORE_FILES paths = self.settings.get('YAML_PATHS', self.settings['PAGE_PATHS']) filelist = self.get_files(paths, exclude=[], extensions=('yml')) self.settings['IGNORE_FILES'] = saved_ignore_files for f in filelist: logger.debug('YamlGenerator - read %s' % f) # Define content class for _navmenu_.yml files. # YamlReader will also process these specially. content_class = DocMeta if os.path.basename(f) == '_navmenu_.yml': content_class = NavMenu doc_meta = self.get_cached_data(f, None) if doc_meta is None: try: doc_meta = self.readers.read_file( base_path=self.path, path=f, content_class=content_class, context=self.context) except Exception as e: logger.error('Could not process %s\n%s', f, e, exc_info=self.settings.get('DEBUG', False)) self._add_failed_source_path(f) continue if not is_valid_content(doc_meta, f): self._add_failed_source_path(f) continue self.cache_data(f, doc_meta) self.add_source_path(doc_meta)
def generate_context(self): """change the context""" article_path = os.path.join(self.path, self.settings['ARTICLE_DIR']) all_articles = [] for f in self.get_files( article_path, exclude=self.settings['ARTICLE_EXCLUDES']): try: content, metadata = read_file(f, settings=self.settings) except Exception, e: logger.warning(u'Could not process %s\n%s' % (f, str(e))) continue # if no category is set, use the name of the path as a category if 'category' not in metadata: if os.path.dirname(f) == article_path: category = self.settings['DEFAULT_CATEGORY'] else: category = os.path.basename(os.path.dirname(f))\ .decode('utf-8') if category != '': metadata['category'] = Category(category, self.settings) if 'date' not in metadata and self.settings['FALLBACK_ON_FS_DATE']: metadata['date'] = datetime.datetime.fromtimestamp( os.stat(f).st_ctime) article = Article(content, metadata, settings=self.settings, filename=f) if not is_valid_content(article, f): continue if article.status == "published": if hasattr(article, 'tags'): for tag in article.tags: self.tags[tag].append(article) all_articles.append(article) elif article.status == "draft": self.drafts.append(article)
def generate_context(self): for speaker_file in self.get_files(self.settings.get('SPEAKER_DIR', 'speakers'), exclude=self.settings.get('SPEAKER_EXCLUDES', '')): try: speaker = self.readers.read_file( base_path=self.path, path=speaker_file, content_class=Speaker, context=self.context ) except Exception as e: logger.warning("Could not process speaker {0}\n{1}".format(speaker_file, e)) continue if is_valid_content(speaker, speaker_file): self.speaker_manager.add(speaker) else: logger.debug("A speaker file is NOT VALID content: {0},{1}".format(speaker, speaker_file)) self._update_context(('speaker_manager',)) self.context['speaker_manager'] = self.speaker_manager
def generate_context(self): all_pages = [] for f in self.get_files(os.sep.join((self.path, 'pages'))): content, metadata = read_file(f) page = Page(content, metadata, settings=self.settings, filename=f) if not is_valid_content(page, f): continue if self.settings.get('CLEAN_URLS_NO_PROXY'): # cleaning page url page.save_as = os.path.join(page.slug, 'index.html') page.url = os.path.dirname(page.save_as) + '/' all_pages.append(page) self.pages, self.translations = process_translations(all_pages) self._update_context(('pages', )) self.context['PAGES'] = self.pages
def generate_context(self): for author_file in self.get_files( self.settings.get('AUTHOR_DIR', 'authors'), exclude=self.settings.get('AUTHOR_EXCLUDES', '')): try: author = self.readers.read_file(base_path=self.path, path=author_file, content_class=AuthorBiography, context=self.context) except Exception as e: logger.warning("Could not process author {0}\n{1}".format( author_file, e)) continue if is_valid_content(author, author_file): self.authors_info.add(author) self._update_context(('authors_info', )) self.context['authors_info'] = self.authors_info
def generate_context(self): for talk_file in self.get_files(self.settings.get('TALKS_DIR', 'talks'), exclude=self.settings.get('TALKS_EXCLUDES', '')): try: talk = self.readers.read_file( base_path=self.path, path=talk_file, content_class=Talk, context=self.context ) except Exception as e: logger.warning("Could not process talk {0}\n{1}".format(talk_file, e)) continue if is_valid_content(talk, talk_file): self.talk_manager.add(talk) else: logger.debug("A talk file is NOT VALID content: {0},{1}".format(talk, talk_file)) self._update_context(('talk_manager',)) self.context['talk_manager'] = self.talk_manager
def generate_context(self): all_pages = [] pagecat_map = dict(self.settings.get('PAGECAT_MAP')) for f in self.get_files( os.path.join(self.path, self.settings['PAGE_DIR']), exclude=self.settings['PAGE_EXCLUDES']): try: content, metadata = read_file(f, settings=self.settings) except Exception, e: print(u'Could not process %s\n%s' % (f, str(e))) continue # if no sorting is set, set default to 99 if 'sorting' not in metadata.keys(): metadata['sorting'] = 99 # if no category is set, use the name of the path as a category if 'category' not in metadata.keys(): if os.path.dirname(f) == self.path: category = 'NO_CATEGORY' else: category = os.path.basename(os.path.dirname(f)).decode('utf-8') if category != '': title = pagecat_map[category] \ if category in pagecat_map else None metadata['category'] = PagesCategory( category, self.settings, title) page = Page(content, metadata, settings=self.settings, filename=f) if not is_valid_content(page, f): continue # all pages which status is not "published" will be ignored if page.status == "published": all_pages.append(page) else: self.ignored_pages.append(page)
def generate_context(self): """Add the articles into the shared context.""" logger.debug("SlokaGenerator: Generate context") self.articles = [] # TODO handle cases where settings are not set for f in self.get_files( self.settings['SLOKA_DIR'], exclude=self.settings['SLOKA_EXCLUDES']): try: # TODO index files # if f.endswith("info.json"): # continue sloka = self.readers.read_file( base_path=self.path, path=f, content_class=Sloka, context=self.context, preread_signal=prajna.signals.sloka_generator_preread, preread_sender=self, context_signal=prajna.signals.sloka_generator_context, context_sender=self) logger.debug("SlokaGenerator: file: {0}, content: {1}" .format(sloka.source_path, sloka.content)) except Exception as e: logger.warning('SlokaGenerator: Could not process {}' .format(f)) logger.exception(e) continue if not is_valid_content(sloka, f): continue self.articles.append(sloka) # TODO sort articles by filename # TODO organize articles by chapters # TODO link transliterations to original article prajna.signals.sloka_generator_finalized.send(self)
def generate_context(self): for speaker_file in self.get_files( self.settings.get('SPEAKER_DIR', 'speakers'), exclude=self.settings.get('SPEAKER_EXCLUDES', '')): try: speaker = self.readers.read_file(base_path=self.path, path=speaker_file, content_class=Speaker, context=self.context) except Exception as e: logger.warning("Could not process speaker {0}\n{1}".format( speaker_file, e)) continue if is_valid_content(speaker, speaker_file): self.speaker_manager.add(speaker) else: logger.debug( "A speaker file is NOT VALID content: {0},{1}".format( speaker, speaker_file)) self._update_context(('speaker_manager', )) self.context['speaker_manager'] = self.speaker_manager
def generate_context(self): candidates_path = os.path.normpath( os.path.join(self.path, 'candidates') ) for f in self.get_files(candidates_path): try: content, metadata = read_file(f, settings=self.settings) except Exception, e: continue # Position is determined by directory position = os.path.basename(os.path.dirname(f)).decode('utf-8') metadata['position'] = position candidate = Candidate(content, metadata, settings=self.settings, filename=f) if not is_valid_content(candidate, f): continue if not self.candidates.has_key(position): self.candidates[position] = [] self.candidates[position].append(candidate)
def generate_context(self): for talk_file in self.get_files( self.settings.get('TALKS_DIR', 'talks'), exclude=self.settings.get('TALKS_EXCLUDES', '')): try: talk = self.readers.read_file(base_path=self.path, path=talk_file, content_class=Talk, context=self.context) except Exception as e: logger.warning("Could not process talk {0}\n{1}".format( talk_file, e)) continue if is_valid_content(talk, talk_file): self.talk_manager.add(talk) else: logger.debug( "A talk file is NOT VALID content: {0},{1}".format( talk, talk_file)) self._update_context(('talk_manager', )) self.context['talk_manager'] = self.talk_manager
def generate_context(self): """Add the articles into the shared context""" all_articles = [] for f in self.get_files( self.settings['ARTICLE_DIR'], exclude=self.settings['ARTICLE_EXCLUDES']): try: article = self.readers.read_file( base_path=self.path, path=f, content_class=Article, context=self.context, preread_signal=signals.article_generator_preread, preread_sender=self, context_signal=signals.article_generator_context, context_sender=self) except Exception as e: logger.warning('Could not process {}\n{}'.format(f, e)) continue if not is_valid_content(article, f): continue self.add_source_path(article) if article.status == "published": all_articles.append(article) elif article.status == "draft": self.drafts.append(article) else: logger.warning("Unknown status %s for file %s, skipping it." % (repr(article.status), repr(f))) self.articles, self.translations = process_translations(all_articles) for article in self.articles: # only main articles are listed in categories and tags # not translations self.categories[article.category].append(article) if hasattr(article, 'tags'): for tag in article.tags: self.tags[tag].append(article) # ignore blank authors as well as undefined if hasattr(article, 'author') and article.author.name != '': self.authors[article.author].append(article) # sort the articles by date self.articles.sort(key=attrgetter('date'), reverse=True) self.dates = list(self.articles) self.dates.sort(key=attrgetter('date'), reverse=self.context['NEWEST_FIRST_ARCHIVES']) # create tag cloud tag_cloud = defaultdict(int) for article in self.articles: for tag in getattr(article, 'tags', []): tag_cloud[tag] += 1 tag_cloud = sorted(tag_cloud.items(), key=itemgetter(1), reverse=True) tag_cloud = tag_cloud[:self.settings.get('TAG_CLOUD_MAX_ITEMS')] tags = list(map(itemgetter(1), tag_cloud)) if tags: max_count = max(tags) steps = self.settings.get('TAG_CLOUD_STEPS') # calculate word sizes self.tag_cloud = [ ( tag, int(math.floor(steps - (steps - 1) * math.log(count) / (math.log(max_count)or 1))) ) for tag, count in tag_cloud ] # put words in chaos random.shuffle(self.tag_cloud) # and generate the output :) # order the categories per name self.categories = list(self.categories.items()) self.categories.sort( reverse=self.settings['REVERSE_CATEGORY_ORDER']) self.authors = list(self.authors.items()) self.authors.sort() self._update_context(('articles', 'dates', 'tags', 'categories', 'tag_cloud', 'authors', 'related_posts')) signals.article_generator_finalized.send(self)
def generate_context(self): """Add the articles into the shared context""" all_articles = [] for f in self.get_files(self.settings['ARTICLE_DIR'], exclude=self.settings['ARTICLE_EXCLUDES']): try: article = read_file( base_path=self.path, path=f, content_class=Article, settings=self.settings, context=self.context, preread_signal=signals.article_generator_preread, preread_sender=self, context_signal=signals.article_generator_context, context_sender=self) except Exception as e: logger.warning('Could not process {}\n{}'.format(f, e)) continue if not is_valid_content(article, f): continue self.add_source_path(article) if article.status == "published": all_articles.append(article) elif article.status == "draft": self.drafts.append(article) else: logger.warning("Unknown status %s for file %s, skipping it." % (repr(article.status), repr(f))) self.articles, self.translations = process_translations(all_articles) for article in self.articles: # only main articles are listed in categories and tags # not translations self.categories[article.category].append(article) if hasattr(article, 'tags'): for tag in article.tags: self.tags[tag].append(article) # ignore blank authors as well as undefined if hasattr(article, 'author') and article.author.name != '': self.authors[article.author].append(article) # sort the articles by date self.articles.sort(key=attrgetter('date'), reverse=True) self.dates = list(self.articles) self.dates.sort(key=attrgetter('date'), reverse=self.context['NEWEST_FIRST_ARCHIVES']) # create tag cloud tag_cloud = defaultdict(int) for article in self.articles: for tag in getattr(article, 'tags', []): tag_cloud[tag] += 1 tag_cloud = sorted(tag_cloud.items(), key=itemgetter(1), reverse=True) tag_cloud = tag_cloud[:self.settings.get('TAG_CLOUD_MAX_ITEMS')] tags = list(map(itemgetter(1), tag_cloud)) if tags: max_count = max(tags) steps = self.settings.get('TAG_CLOUD_STEPS') # calculate word sizes self.tag_cloud = [ (tag, int( math.floor(steps - (steps - 1) * math.log(count) / (math.log(max_count) or 1)))) for tag, count in tag_cloud ] # put words in chaos random.shuffle(self.tag_cloud) # and generate the output :) # order the categories per name self.categories = list(self.categories.items()) self.categories.sort(reverse=self.settings['REVERSE_CATEGORY_ORDER']) self.authors = list(self.authors.items()) self.authors.sort() self._update_context(('articles', 'dates', 'tags', 'categories', 'tag_cloud', 'authors', 'related_posts')) signals.article_generator_finalized.send(self)
def generate_context(self): """change the context""" # return the list of files to use files = self.get_files(self.path, exclude=[ 'pages', ]) all_articles = [] for f in files: content, metadata = read_file(f) # if no category is set, use the name of the path as a category if 'category' not in metadata.keys(): if os.path.dirname(f) == self.path: category = self.settings['DEFAULT_CATEGORY'] else: category = os.path.basename(os.path.dirname(f)) if category != '': metadata['category'] = unicode(category) if 'date' not in metadata.keys()\ and self.settings['FALLBACK_ON_FS_DATE']: metadata['date'] = datetime.fromtimestamp(os.stat(f).st_ctime) article = Article(content, metadata, settings=self.settings, filename=f) if not is_valid_content(article, f): continue add_to_url = u'' if 'ARTICLE_PERMALINK_STRUCTURE' in self.settings: article_permalink_structure = self.settings[ 'ARTICLE_PERMALINK_STRUCTURE'] article_permalink_structure = article_permalink_structure.lstrip( '/') # try to substitute any python datetime directive add_to_url = article.date.strftime(article_permalink_structure) # try to substitute any article metadata in rest file add_to_url = add_to_url % article.__dict__ add_to_url = [slugify(i) for i in add_to_url.split('/')] add_to_url = os.path.join(*add_to_url) article.url = urlparse.urljoin(add_to_url, article.url) article.save_as = urlparse.urljoin(add_to_url, article.save_as) if article.status == "published": if hasattr(article, 'tags'): for tag in article.tags: self.tags[tag].append(article) all_articles.append(article) elif article.status == "draft": self.drafts.append(article) self.articles, self.translations = process_translations(all_articles) for article in self.articles: # only main articles are listed in categories, not translations self.categories[article.category].append(article) self.authors[article.author].append(article) # sort the articles by date self.articles.sort(key=attrgetter('date'), reverse=True) self.dates = list(self.articles) self.dates.sort(key=attrgetter('date'), reverse=self.context['REVERSE_ARCHIVE_ORDER']) # create tag cloud tag_cloud = defaultdict(int) for article in self.articles: for tag in getattr(article, 'tags', []): tag_cloud[tag] += 1 tag_cloud = sorted(tag_cloud.items(), key=itemgetter(1), reverse=True) tag_cloud = tag_cloud[:self.settings.get('TAG_CLOUD_MAX_ITEMS')] tags = map(itemgetter(1), tag_cloud) if tags: max_count = max(tags) steps = self.settings.get('TAG_CLOUD_STEPS') # calculate word sizes self.tag_cloud = [ (tag, int( math.floor(steps - (steps - 1) * math.log(count) / (math.log(max_count) or 1)))) for tag, count in tag_cloud ] # put words in chaos random.shuffle(self.tag_cloud) # and generate the output :) # order the categories per name self.categories = list(self.categories.items()) self.categories.sort( reverse=self.settings.get('REVERSE_CATEGORY_ORDER')) self.authors = list(self.authors.items()) self.authors.sort() self._update_context(('articles', 'dates', 'tags', 'categories', 'tag_cloud', 'authors'))
def generate_context(self): """change the context""" # return the list of files to use files = self.get_files(self.path, exclude=[ 'pages', ]) all_articles = [] for f in files: content, metadatas = read_file(f) # if no category is set, use the name of the path as a category if 'category' not in metadatas.keys(): if os.path.dirname(f) == self.path: category = self.settings['DEFAULT_CATEGORY'] else: category = os.path.basename(os.path.dirname(f)) if category != '': metadatas['category'] = unicode(category) if 'date' not in metadatas.keys()\ and self.settings['FALLBACK_ON_FS_DATE']: metadatas['date'] = datetime.fromtimestamp(os.stat(f).st_ctime) article = Article(content, metadatas, settings=self.settings, filename=f) if not is_valid_content(article, f): continue if hasattr(article, 'tags'): for tag in article.tags: self.tags[tag].append(article) all_articles.append(article) self.articles, self.translations = process_translations(all_articles) for article in self.articles: # only main articles are listed in categories, not translations self.categories[article.category].append(article) # sort the articles by date self.articles.sort(key=attrgetter('date'), reverse=True) self.dates = list(self.articles) self.dates.sort(key=attrgetter('date'), reverse=self.context['REVERSE_ARCHIVE_ORDER']) # create tag cloud tag_cloud = defaultdict(int) for article in self.articles: for tag in getattr(article, 'tags', []): tag_cloud[tag] += 1 tag_cloud = sorted(tag_cloud.items(), key=itemgetter(1), reverse=True) tag_cloud = tag_cloud[:self.settings.get('TAG_CLOUD_MAX_ITEMS')] tags = map(itemgetter(1), tag_cloud) if tags: max_count = max(tags) steps = self.settings.get('TAG_CLOUD_STEPS') # calculate word sizes self.tag_cloud = [ (tag, int( math.floor(steps - (steps - 1) * math.log(count) / math.log(max_count)))) for tag, count in tag_cloud ] # put words in chaos random.shuffle(self.tag_cloud) # and generate the output :) self._update_context( ('articles', 'dates', 'tags', 'categories', 'tag_cloud'))
def generate_context(self): """Add the entities into the shared context""" all_entities = [] all_drafts = [] files = self.get_files(self.settings['PATHS'], exclude=self.settings['EXCLUDES']) for f in files: entity_or_draft = self.get_cached_data(f, None) if entity_or_draft is None: entity_class = EntityFactory( self.entity_type, self.settings['MANDATORY_PROPERTIES'], self.settings['DEFAULT_TEMPLATE'], OvhEntity) try: entity_or_draft = self.readers.read_file( base_path=self.path, path=f, content_class=entity_class, context=self.context, preread_signal=ovh_entity_subgenerator_preread, preread_sender=self, context_signal=ovh_entity_subgenerator_context, context_sender=self) except Exception as e: logger.error('Could not process %s\n%s', f, e, exc_info=self.settings.get( 'DEBUG', False)) self._add_failed_source_path(f) continue if not contents.is_valid_content(entity_or_draft, f): self._add_failed_source_path(f) continue known_statuses = ("published", "draft") if entity_or_draft.status.lower() not in known_statuses: logger.warning( "Unknown status '%s' for file %s, skipping it.", entity_or_draft.status, f) self._add_failed_source_path(f) continue self.cache_data(f, entity_or_draft) if entity_or_draft.status.lower() == "published": all_entities.append(entity_or_draft) else: all_drafts.append(entity_or_draft) self.add_legacy_id_path(entity_or_draft) self.add_source_path(entity_or_draft) self.entities, self.translations = process_translations( all_entities) self.drafts, self.drafts_translations = \ process_translations(all_drafts) sorter = self.settings["SORTER"] sorter(self.entities) # and generate the output :) # order the categories per name self.categories = [] self.authors = [] self.save_cache() self.readers.save_cache()
def generate_context(self): """change the context""" # return the list of files to use files = self.get_files(self.path, exclude=['pages',]) all_articles = [] for f in files: content, metadata = read_file(f) # if no category is set, use the name of the path as a category if 'category' not in metadata.keys(): if os.path.dirname(f) == self.path: category = self.settings['DEFAULT_CATEGORY'] else: category = os.path.basename(os.path.dirname(f)) if category != '': metadata['category'] = unicode(category) if 'date' not in metadata.keys()\ and self.settings['FALLBACK_ON_FS_DATE']: metadata['date'] = datetime.fromtimestamp(os.stat(f).st_ctime) article = Article(content, metadata, settings=self.settings, filename=f) if not is_valid_content(article, f): continue if article.status == "published": if hasattr(article, 'tags'): for tag in article.tags: self.tags[tag].append(article) all_articles.append(article) elif article.status == "draft": self.drafts.append(article) self.articles, self.translations = process_translations(all_articles) for article in self.articles: # only main articles are listed in categories, not translations self.categories[article.category].append(article) # sort the articles by date self.articles.sort(key=attrgetter('date'), reverse=True) self.dates = list(self.articles) self.dates.sort(key=attrgetter('date'), reverse=self.context['REVERSE_ARCHIVE_ORDER']) # create tag cloud tag_cloud = defaultdict(int) for article in self.articles: for tag in getattr(article, 'tags', []): tag_cloud[tag] += 1 tag_cloud = sorted(tag_cloud.items(), key = itemgetter(1), reverse = True) tag_cloud = tag_cloud[:self.settings.get('TAG_CLOUD_MAX_ITEMS')] tags = map(itemgetter(1), tag_cloud) if tags: max_count = max(tags) steps = self.settings.get('TAG_CLOUD_STEPS') # calculate word sizes self.tag_cloud = [ ( tag, int( math.floor(steps - (steps - 1) * math.log(count) / (math.log(max_count)or 1)) ) ) for tag, count in tag_cloud ] # put words in chaos random.shuffle(self.tag_cloud) # and generate the output :) # order the categories per name self.categories = list(self.categories.items()) self.categories.sort(reverse=self.settings.get('REVERSE_CATEGORY_ORDER')) self._update_context(('articles', 'dates', 'tags', 'categories', 'tag_cloud'))
def generate_context(self): """Add the articles into the shared context""" all_articles = [] all_drafts = [] for f in self.get_files( self.settings['ARTICLE_PATHS'], exclude=self.settings['ARTICLE_EXCLUDES']): article = self.get_cached_data(f, None) if article is None: try: article = self.readers.read_file( base_path=self.path, path=f, content_class=Article, context=self.context, preread_signal=signals.article_generator_preread, preread_sender=self, context_signal=signals.article_generator_context, context_sender=self) except Exception as e: logger.error('Could not process %s\n%s', f, e, exc_info=self.settings.get('DEBUG', False)) self._add_failed_source_path(f) continue if not is_valid_content(article, f): self._add_failed_source_path(f) continue self.cache_data(f, article) self.add_source_path(article) if article.status.lower() == "published": all_articles.append(article) elif article.status.lower() == "draft": draft = self.readers.read_file( base_path=self.path, path=f, content_class=Draft, context=self.context, preread_signal=signals.article_generator_preread, preread_sender=self, context_signal=signals.article_generator_context, context_sender=self) self.add_source_path(draft) all_drafts.append(draft) else: logger.error("Unknown status '%s' for file %s, skipping it.", article.status, f) self.articles, self.translations = process_translations(all_articles, order_by=self.settings['ARTICLE_ORDER_BY']) self.drafts, self.drafts_translations = \ process_translations(all_drafts) signals.article_generator_pretaxonomy.send(self) for article in self.articles: # only main articles are listed in categories and tags # not translations self.categories[article.category].append(article) if hasattr(article, 'tags'): for tag in article.tags: self.tags[tag].append(article) for author in getattr(article, 'authors', []): self.authors[author].append(article) # sort the articles by date self.articles.sort(key=attrgetter('date'), reverse=True) self.dates = list(self.articles) self.dates.sort(key=attrgetter('date'), reverse=self.context['NEWEST_FIRST_ARCHIVES']) # create tag cloud tag_cloud = defaultdict(int) for article in self.articles: for tag in getattr(article, 'tags', []): tag_cloud[tag] += 1 tag_cloud = sorted(tag_cloud.items(), key=itemgetter(1), reverse=True) tag_cloud = tag_cloud[:self.settings.get('TAG_CLOUD_MAX_ITEMS')] tags = list(map(itemgetter(1), tag_cloud)) if tags: max_count = max(tags) steps = self.settings.get('TAG_CLOUD_STEPS') # calculate word sizes self.tag_cloud = [ ( tag, int(math.floor(steps - (steps - 1) * math.log(count) / (math.log(max_count)or 1))) ) for tag, count in tag_cloud ] # put words in chaos random.shuffle(self.tag_cloud) # and generate the output :) # order the categories per name self.categories = list(self.categories.items()) self.categories.sort( reverse=self.settings['REVERSE_CATEGORY_ORDER']) self.authors = list(self.authors.items()) self.authors.sort() self._update_context(('articles', 'dates', 'tags', 'categories', 'tag_cloud', 'authors', 'related_posts')) self.save_cache() self.readers.save_cache() signals.article_generator_finalized.send(self)
def generate_context(self): """change the context""" # return the list of files to use files = self.get_files(self.path, exclude=[ 'pages', ]) all_articles = [] for f in files: try: content, metadata = read_file(f, settings=self.settings) except Exception, e: warning(u'Could not process %s\n%s' % (f, str(e))) continue # if no category is set, use the name of the path as a category if 'category' not in metadata.keys(): if os.path.dirname(f) == self.path: category = self.settings['DEFAULT_CATEGORY'] else: category = os.path.basename( os.path.dirname(f)).decode('utf-8') if category != '': metadata['category'] = unicode(category) if 'date' not in metadata.keys()\ and self.settings['FALLBACK_ON_FS_DATE']: metadata['date'] = datetime.fromtimestamp(os.stat(f).st_ctime) article = Article(content, metadata, settings=self.settings, filename=f) if not is_valid_content(article, f): continue add_to_url = u'' if 'ARTICLE_PERMALINK_STRUCTURE' in self.settings: article_permalink_structure = self.settings[ 'ARTICLE_PERMALINK_STRUCTURE'] article_permalink_structure = article_permalink_structure.lstrip( '/').replace('%(', "%%(") # try to substitute any python datetime directive add_to_url = article.date.strftime(article_permalink_structure) # try to substitute any article metadata in rest file add_to_url = add_to_url % article.__dict__ add_to_url = [slugify(i) for i in add_to_url.split('/')] add_to_url = os.path.join(*add_to_url) article.url = urlparse.urljoin(add_to_url, article.url) article.save_as = urlparse.urljoin(add_to_url, article.save_as) if article.status == "published": if hasattr(article, 'tags'): for tag in article.tags: self.tags[tag].append(article) all_articles.append(article) elif article.status == "draft": self.drafts.append(article)
def generate_context(self): """Add the articles into the shared context""" article_path = os.path.normpath( # we have to remove trailing slashes os.path.join(self.path, self.settings['ARTICLE_DIR']) ) all_articles = [] for f in self.get_files( article_path, exclude=self.settings['ARTICLE_EXCLUDES']): try: signals.article_generate_preread.send(self) content, metadata = read_file(f, settings=self.settings) except Exception as e: logger.warning(u'Could not process %s\n%s' % (f, str(e))) continue # if no category is set, use the name of the path as a category if 'category' not in metadata: if (self.settings['USE_FOLDER_AS_CATEGORY'] and os.path.dirname(f) != article_path): # if the article is in a subdirectory category = os.path.basename(os.path.dirname(f))\ .decode('utf-8') else: # if the article is not in a subdirectory category = self.settings['DEFAULT_CATEGORY'] if category != '': metadata['category'] = Category(category, self.settings) if 'date' not in metadata and self.settings.get('DEFAULT_DATE'): if self.settings['DEFAULT_DATE'] == 'fs': metadata['date'] = datetime.datetime.fromtimestamp( os.stat(f).st_ctime) else: metadata['date'] = datetime.datetime( *self.settings['DEFAULT_DATE']) signals.article_generate_context.send(self, metadata=metadata) article = Article(content, metadata, settings=self.settings, filename=f, context=self.context) if not is_valid_content(article, f): continue self.add_filename(article) if article.status == "published": if hasattr(article, 'tags'): for tag in article.tags: self.tags[tag].append(article) all_articles.append(article) elif article.status == "draft": self.drafts.append(article) else: logger.warning(u"Unknown status %s for file %s, skipping it." % (repr(unicode.encode(article.status, 'utf-8')), repr(f))) self.articles, self.translations = process_translations(all_articles) for article in self.articles: # only main articles are listed in categories, not translations self.categories[article.category].append(article) # ignore blank authors as well as undefined if hasattr(article,'author') and article.author.name != '': self.authors[article.author].append(article) # sort the articles by date self.articles.sort(key=attrgetter('date'), reverse=True) self.dates = list(self.articles) self.dates.sort(key=attrgetter('date'), reverse=self.context['NEWEST_FIRST_ARCHIVES']) # create tag cloud tag_cloud = defaultdict(int) for article in self.articles: for tag in getattr(article, 'tags', []): tag_cloud[tag] += 1 tag_cloud = sorted(tag_cloud.items(), key=itemgetter(1), reverse=True) tag_cloud = tag_cloud[:self.settings.get('TAG_CLOUD_MAX_ITEMS')] tags = map(itemgetter(1), tag_cloud) if tags: max_count = max(tags) steps = self.settings.get('TAG_CLOUD_STEPS') # calculate word sizes self.tag_cloud = [ ( tag, int(math.floor(steps - (steps - 1) * math.log(count) / (math.log(max_count)or 1))) ) for tag, count in tag_cloud ] # put words in chaos random.shuffle(self.tag_cloud) # and generate the output :) # order the categories per name self.categories = list(self.categories.items()) self.categories.sort( key=lambda item: item[0].name, reverse=self.settings['REVERSE_CATEGORY_ORDER']) self.authors = list(self.authors.items()) self.authors.sort(key=lambda item: item[0].name) self._update_context(('articles', 'dates', 'tags', 'categories', 'tag_cloud', 'authors', 'related_posts')) signals.article_generator_finalized.send(self)
def generate_context(self): """Add the entities into the shared context""" all_entities = [] all_drafts = [] for f in self.get_files( self.settings['PATHS'], exclude=self.settings['EXCLUDES']): entity_or_draft = self.get_cached_data(f, None) if entity_or_draft is None: entity_class = EntityFactory( self.entity_type, self.settings['MANDATORY_PROPERTIES'], self.settings['DEFAULT_TEMPLATE']) try: entity_or_draft = self.readers.read_file( base_path=self.path, path=f, content_class=entity_class, context=self.context, preread_signal=entity_subgenerator_preread, preread_sender=self, context_signal=entity_subgenerator_context, context_sender=self) except Exception as e: logger.error('Could not process %s\n%s', f, e, exc_info=self.settings.get('DEBUG', False)) self._add_failed_source_path(f) continue if not contents.is_valid_content(entity_or_draft, f): self._add_failed_source_path(f) continue if entity_or_draft.status.lower() == "published": all_entities.append(entity_or_draft) elif entity_or_draft.status.lower() == "draft": all_drafts.append(entity_or_draft) self.add_source_path(entity_or_draft) continue else: logger.warning("Unknown status '%s' for file %s, skipping it.", entity_or_draft.status, f) self._add_failed_source_path(f) continue self.cache_data(f, entity_or_draft) self.add_source_path(entity_or_draft) self.entities, self.translations = process_translations( all_entities) self.drafts, self.drafts_translations = \ process_translations(all_drafts) sorter = self.settings["SORTER"] sorter(self.entities) entity_subgenerator_pretaxonomy.send(self) for entity_or_draft in self.entities: # only main entities are listed in categories and tags # not translations if hasattr(entity_or_draft, 'category'): self.categories[entity_or_draft.category].append(entity_or_draft) if hasattr(entity_or_draft, 'tags'): for tag in entity_or_draft.tags: self.tags[tag].append(entity_or_draft) for author in getattr(entity_or_draft, 'authors', []): self.authors[author].append(entity_or_draft) # and generate the output :) # order the categories per name self.categories = list(self.categories.items()) self.categories.sort( reverse=self.settings['REVERSE_CATEGORY_ORDER']) self.authors = list(self.authors.items()) self.authors.sort() self.save_cache() self.readers.save_cache() entity_subgenerator_finalized.send(self)
def generate_context(self): """Add the articles into the shared context""" all_articles = [] all_drafts = [] for f in self.get_files( self.settings['ARTICLE_PATHS'], exclude=self.settings['ARTICLE_EXCLUDES']): article_or_draft = self.get_cached_data(f, None) if article_or_draft is None: #TODO needs overhaul, maybe nomad for read_file solution, unified behaviour try: article_or_draft = self.readers.read_file( base_path=self.path, path=f, content_class=Article, context=self.context, preread_signal=signals.article_generator_preread, preread_sender=self, context_signal=signals.article_generator_context, context_sender=self) except Exception as e: logger.error('Could not process %s\n%s', f, e, exc_info=self.settings.get('DEBUG', False)) self._add_failed_source_path(f) continue if not is_valid_content(article_or_draft, f): self._add_failed_source_path(f) continue if article_or_draft.status.lower() == "published": all_articles.append(article_or_draft) elif article_or_draft.status.lower() == "draft": article_or_draft = self.readers.read_file( base_path=self.path, path=f, content_class=Draft, context=self.context, preread_signal=signals.article_generator_preread, preread_sender=self, context_signal=signals.article_generator_context, context_sender=self) self.add_source_path(article_or_draft) all_drafts.append(article_or_draft) else: logger.error("Unknown status '%s' for file %s, skipping it.", article_or_draft.status, f) self._add_failed_source_path(f) continue self.cache_data(f, article_or_draft) self.add_source_path(article_or_draft) self.articles, self.translations = process_translations(all_articles, order_by=self.settings['ARTICLE_ORDER_BY']) self.drafts, self.drafts_translations = \ process_translations(all_drafts) signals.article_generator_pretaxonomy.send(self) for article in self.articles: # only main articles are listed in categories and tags # not translations self.categories[article.category].append(article) if hasattr(article, 'tags'): for tag in article.tags: self.tags[tag].append(article) for author in getattr(article, 'authors', []): self.authors[author].append(article) self.dates = list(self.articles) self.dates.sort(key=attrgetter('date'), reverse=self.context['NEWEST_FIRST_ARCHIVES']) # and generate the output :) # order the categories per name self.categories = list(self.categories.items()) self.categories.sort( reverse=self.settings['REVERSE_CATEGORY_ORDER']) self.authors = list(self.authors.items()) self.authors.sort() self._update_context(('articles', 'dates', 'tags', 'categories', 'authors', 'related_posts', 'drafts')) self.save_cache() self.readers.save_cache() signals.article_generator_finalized.send(self)