Ejemplo n.º 1
0
    def generate_context(self):
        self.staticfiles = []

        # walk static paths
        for static_path in self.settings['STATIC_PATHS']:
            for f in self.get_files(os.path.join(self.path, static_path),
                                    extensions=False):
                f_rel = os.path.relpath(f, self.path)
                content, metadata = read_file(f,
                                              fmt='static',
                                              settings=self.settings)
                # TODO remove this hardcoded 'static' subdirectory
                metadata['save_as'] = os.path.join('static', f_rel)
                metadata['url'] = pelican.utils.path_to_url(
                    metadata['save_as'])
                sc = Static(content=None,
                            metadata=metadata,
                            settings=self.settings,
                            source_path=f_rel)
                self.staticfiles.append(sc)
                self.add_source_path(sc)
        # same thing for FILES_TO_COPY
        for src, dest in self.settings['FILES_TO_COPY']:
            content, metadata = read_file(src,
                                          fmt='static',
                                          settings=self.settings)
            metadata['save_as'] = dest
            metadata['url'] = pelican.utils.path_to_url(metadata['save_as'])
            sc = Static(content=None,
                        metadata={'save_as': dest},
                        settings=self.settings,
                        source_path=src)
            self.staticfiles.append(sc)
            self.add_source_path(sc)
Ejemplo n.º 2
0
    def generate_context(self):
        self.staticfiles = []

        # walk static paths
        for static_path in self.settings['STATIC_PATHS']:
            for f in self.get_files(
                    os.path.join(self.path, static_path), extensions=False):
                f_rel = os.path.relpath(f, self.path)
                content, metadata = read_file(
                    f, fmt='static', settings=self.settings)
                # TODO remove this hardcoded 'static' subdirectory
                metadata['save_as'] = os.path.join('static', f_rel)
                metadata['url'] = pelican.utils.path_to_url(metadata['save_as'])
                sc = Static(
                    content=None,
                    metadata=metadata,
                    settings=self.settings,
                    source_path=f_rel)
                self.staticfiles.append(sc)
                self.add_source_path(sc)
        # same thing for FILES_TO_COPY
        for src, dest in self.settings['FILES_TO_COPY']:
            content, metadata = read_file(
                src, fmt='static', settings=self.settings)
            metadata['save_as'] = dest
            metadata['url'] = pelican.utils.path_to_url(metadata['save_as'])
            sc = Static(
                content=None,
                metadata={'save_as': dest},
                settings=self.settings,
                source_path=src)
            self.staticfiles.append(sc)
            self.add_source_path(sc)
Ejemplo n.º 3
0
    def generate_context(self):
        all_pages = []
        hidden_pages = []
        for f in self.get_files(
                os.path.join(self.path, self.settings['PAGE_DIR']),
                exclude=self.settings['PAGE_EXCLUDES']):
            try:
                content, metadata = read_file(f, settings=self.settings)
            except Exception, e:
                logger.warning(u'Could not process %s\n%s' % (f, str(e)))
                continue
            signals.pages_generate_context.send(self, metadata=metadata)
            page = Page(content, metadata, settings=self.settings,
                        filename=f, context=self.context)
            if not is_valid_content(page, f):
                continue

            self.add_filename(page)

            if page.status == "published":
                all_pages.append(page)
            elif page.status == "hidden":
                hidden_pages.append(page)
            else:
                logger.warning(u"Unknown status %s for file %s, skipping it." %
                               (repr(unicode.encode(page.status, 'utf-8')),
                                repr(f)))
Ejemplo n.º 4
0
    def generate_context(self):
        all_sessions = []
        for f in self.get_files(
                os.path.join(self.path, self.settings['SESSION_DIR']),
                exclude=self.settings['SESSION_EXCLUDES']):
            try:
                content, metadata = read_file(f, settings=self.settings)
                split_and_strip(metadata, 'speakers')
                split_and_strip(metadata, 'bios')
            except Exception, e:
                logger.error(u'Could not process %s\n%s' % (f, unicode(e)))
                continue
            session = Session(content, metadata, settings=self.settings,
                        source_path=f, context=self.context)
            if not is_valid_content(session, f):
                continue

            self.add_source_path(session)

            if session.status == "published":
                if hasattr(session, 'tags'):
                    for tag in session.tags:
                        conference.sessions.by_tag[tag].append(session)
                conference.add_session(session)
            elif session.status == "draft":
                self.drafts.append(session)
            else:
                logger.error(u"Unknown status %s for file %s, skipping it." %
                               (repr(unicode.encode(session.status, 'utf-8')),
                                repr(f)))
Ejemplo n.º 5
0
    def generate_context(self):
        all_pages = []
        hidden_pages = []
        for f in self.get_files(
                os.path.join(self.path, self.settings['PAGE_DIR']),
                exclude=self.settings['PAGE_EXCLUDES']):
            try:
                content, metadata = read_file(f, settings=self.settings)
            except Exception as e:
                logger.warning(u'Could not process %s\n%s' % (f, str(e)))
                continue
            signals.pages_generate_context.send(self, metadata=metadata)
            page = Page(content, metadata, settings=self.settings,
                        filename=f, context=self.context)
            if not is_valid_content(page, f):
                continue

            self.add_filename(page)

            if page.status == "published":
                all_pages.append(page)
            elif page.status == "hidden":
                hidden_pages.append(page)
            else:
                logger.warning(u"Unknown status %s for file %s, skipping it." %
                               (repr(unicode.encode(page.status, 'utf-8')),
                                repr(f)))

        self.pages, self.translations = process_translations(all_pages)
        self.hidden_pages, self.hidden_translations = process_translations(hidden_pages)

        self._update_context(('pages', ))
        self.context['PAGES'] = self.pages
Ejemplo n.º 6
0
    def generate_context(self):
        all_pages = []
        hidden_pages = []
        for f in self.get_files(
            os.path.join(self.path, self.settings["PAGE_DIR"]), exclude=self.settings["PAGE_EXCLUDES"]
        ):
            try:
                content, metadata = read_file(f, settings=self.settings)
            except Exception as e:
                logger.warning("Could not process %s\n%s" % (f, str(e)))
                continue
            signals.pages_generate_context.send(self, metadata=metadata)
            page = Page(content, metadata, settings=self.settings, source_path=f, context=self.context)
            if not is_valid_content(page, f):
                continue

            self.add_source_path(page)

            if page.status == "published":
                all_pages.append(page)
            elif page.status == "hidden":
                hidden_pages.append(page)
            else:
                logger.warning("Unknown status %s for file %s, skipping it." % (repr(page.status), repr(f)))

        self.pages, self.translations = process_translations(all_pages)
        self.hidden_pages, self.hidden_translations = process_translations(hidden_pages)

        self._update_context(("pages",))
        self.context["PAGES"] = self.pages
Ejemplo n.º 7
0
    def generate_context(self):
        """Add the articles into the shared context"""

        article_path = os.path.normpath(  # we have to remove trailing slashes
            os.path.join(self.path, self.settings['ARTICLE_DIR'])
        )
        all_articles = []
        for f in self.get_files(
                article_path,
                exclude=self.settings['ARTICLE_EXCLUDES']):
            try:
                signals.article_generate_preread.send(self)
                content, metadata = read_file(f, settings=self.settings)
            except Exception, e:
                logger.warning(u'Could not process %s\n%s' % (f, str(e)))
                continue

            # if no category is set, use the name of the path as a category
            if 'category' not in metadata:

                if (self.settings['USE_FOLDER_AS_CATEGORY']
                    and os.path.dirname(f) != article_path):
                    # if the article is in a subdirectory
                    category = os.path.basename(os.path.dirname(f))\
                        .decode('utf-8')
                else:
                    # if the article is not in a subdirectory
                    category = self.settings['DEFAULT_CATEGORY']

                if category != '':
                    metadata['category'] = Category(category, self.settings)

            if 'date' not in metadata and self.settings.get('DEFAULT_DATE'):
                if self.settings['DEFAULT_DATE'] == 'fs':
                    metadata['date'] = datetime.datetime.fromtimestamp(
                            os.stat(f).st_ctime)
                else:
                    metadata['date'] = datetime.datetime(
                            *self.settings['DEFAULT_DATE'])

            signals.article_generate_context.send(self, metadata=metadata)
            article = Article(content, metadata, settings=self.settings,
                              filename=f, context=self.context)
            if not is_valid_content(article, f):
                continue

            self.add_filename(article)

            if article.status == "published":
                if hasattr(article, 'tags'):
                    for tag in article.tags:
                        self.tags[tag].append(article)
                all_articles.append(article)
            elif article.status == "draft":
                self.drafts.append(article)
            else:
                logger.warning(u"Unknown status %s for file %s, skipping it." %
                               (repr(unicode.encode(article.status, 'utf-8')),
                                repr(f)))
Ejemplo n.º 8
0
    def generate_context(self):
        """Add the articles into the shared context"""

        article_path = os.path.normpath(  # we have to remove trailing slashes
            os.path.join(self.path, self.settings['ARTICLE_DIR'])
        )
        all_articles = []
        for f in self.get_files(
                article_path,
                exclude=self.settings['ARTICLE_EXCLUDES']):
            try:
                signals.article_generate_preread.send(self)
                content, metadata = read_file(f, settings=self.settings)
            except Exception, e:
                logger.warning(u'Could not process %s\n%s' % (f, str(e)))
                continue

            # if no category is set, use the name of the path as a category
            if 'category' not in metadata:

                if (self.settings['USE_FOLDER_AS_CATEGORY']
                    and os.path.dirname(f) != article_path):
                    # if the article is in a subdirectory
                    category = os.path.basename(os.path.dirname(f))\
                        .decode('utf-8')
                else:
                    # if the article is not in a subdirectory
                    category = self.settings['DEFAULT_CATEGORY']

                if category != '':
                    metadata['category'] = Category(category, self.settings)

            if 'date' not in metadata and self.settings.get('DEFAULT_DATE'):
                if self.settings['DEFAULT_DATE'] == 'fs':
                    metadata['date'] = datetime.datetime.fromtimestamp(
                            os.stat(f).st_ctime)
                else:
                    metadata['date'] = datetime.datetime(
                            *self.settings['DEFAULT_DATE'])

            signals.article_generate_context.send(self, metadata=metadata)
            article = Article(content, metadata, settings=self.settings,
                              filename=f, context=self.context)
            if not is_valid_content(article, f):
                continue

            self.add_filename(article)

            if article.status == "published":
                if hasattr(article, 'tags'):
                    for tag in article.tags:
                        self.tags[tag].append(article)
                all_articles.append(article)
            elif article.status == "draft":
                self.drafts.append(article)
            else:
                logger.warning(u"Unknown status %s for file %s, skipping it." %
                               (repr(unicode.encode(article.status, 'utf-8')),
                                repr(f)))
Ejemplo n.º 9
0
 def assert_rst_equal(self, rstfile, expectations):
     filename = join(RESOURCES_PATH, rstfile)
     content, _ = readers.read_file(filename)
     content = content.strip().replace('\n', '')
     extracted_parts = RE_EXTRACT.findall(content)
     self.assertEqual(len(extracted_parts), len(expectations))
     for expected, extracted in zip(expectations, extracted_parts):
         self.assertEqual(extracted, expected)
Ejemplo n.º 10
0
    def generate_context(self):
        """change the context"""

        # return the list of files to use
        files = self.get_files(self.path, exclude=['pages',])
        all_articles = []
        for f in files:

            try:
                content, metadata = read_file(f, settings=self.settings)
            except Exception, e:
                warning(u'Could not process %s\n%s' % (f, str(e)))
                continue

            # if no category is set, use the name of the path as a category
            if 'category' not in metadata.keys():

                if os.path.dirname(f) == self.path:
                    category = self.settings['DEFAULT_CATEGORY']
                else:
                    category = os.path.basename(os.path.dirname(f)).decode('utf-8')

                if category != '':
                    metadata['category'] = unicode(category)

            if 'date' not in metadata.keys()\
                and self.settings['FALLBACK_ON_FS_DATE']:
                    metadata['date'] = datetime.fromtimestamp(os.stat(f).st_ctime)

            article = Article(content, metadata, settings=self.settings,
                              filename=f)
            if not is_valid_content(article, f):
                continue

            add_to_url = u''
            if 'ARTICLE_PERMALINK_STRUCTURE' in self.settings:
                article_permalink_structure = self.settings['ARTICLE_PERMALINK_STRUCTURE']
                article_permalink_structure = article_permalink_structure.lstrip('/').replace('%(', "%%(")

                # try to substitute any python datetime directive
                add_to_url = article.date.strftime(article_permalink_structure)
                # try to substitute any article metadata in rest file
                add_to_url = add_to_url % article.__dict__
                add_to_url = [slugify(i) for i in add_to_url.split('/')]
                add_to_url = os.path.join(*add_to_url)

            article.url = urlparse.urljoin(add_to_url, article.url)
            article.save_as = urlparse.urljoin(add_to_url, article.save_as)

            if article.status == "published":
                if hasattr(article, 'tags'):
                    for tag in article.tags:
                        self.tags[tag].append(article)
                all_articles.append(article)
            elif article.status == "draft":
                self.drafts.append(article)
            elif article.status == "noindex":
                self.noindex.append(article)
Ejemplo n.º 11
0
def publish_draft(path):
  from pelican.readers import read_file
  print(path)
  _, meta = read_file(path, settings = {})

  dir_for_post = os.path.join(POST_DIRECTORY, today_path(meta['date']))
  ensure_dir(dir_for_post)
  print("publishing post: %s into directory: %s" % (path, dir_for_post))
  import shutil
  shutil.move(path, dir_for_post)
Ejemplo n.º 12
0
    def test_typogrify(self):
        # if nothing is specified in the settings, the content should be
        # unmodified
        content, _ = readers.read_file(_filename('article.rst'))
        expected = "<p>This is some content. With some stuff to "\
                   "&quot;typogrify&quot;.</p>\n"

        self.assertEqual(content, expected)

        try:
            # otherwise, typogrify should be applied
            content, _ = readers.read_file(_filename('article.rst'),
                                           settings={'TYPOGRIFY': True})
            expected = "<p>This is some content. With some stuff to&nbsp;"\
                       "&#8220;typogrify&#8221;.</p>\n"

            self.assertEqual(content, expected)
        except ImportError:
            return unittest.skip('need the typogrify distribution')
Ejemplo n.º 13
0
    def test_article_with_filename_metadata(self):
        content, metadata = readers.read_file(
                _path('2012-11-29_rst_w_filename_meta#foo-bar.rst'),
                settings={})
        expected = {
            'category': 'yeah',
            'author': 'Alexis Métaireau',
            'title': 'Rst with filename metadata',
        }
        for key, value in metadata.items():
            self.assertEqual(value, expected[key], key)

        content, metadata = readers.read_file(
                _path('2012-11-29_rst_w_filename_meta#foo-bar.rst'),
                settings={
                    'FILENAME_METADATA': '(?P<date>\d{4}-\d{2}-\d{2}).*'
                    })
        expected = {
            'category': 'yeah',
            'author': 'Alexis Métaireau',
            'title': 'Rst with filename metadata',
            'date': datetime.datetime(2012, 11, 29),
        }
        for key, value in metadata.items():
            self.assertEqual(value, expected[key], key)

        content, metadata = readers.read_file(
                _path('2012-11-29_rst_w_filename_meta#foo-bar.rst'),
                settings={
                    'FILENAME_METADATA': '(?P<date>\d{4}-\d{2}-\d{2})_'
                                         '_(?P<Slug>.*)'
                                         '#(?P<MyMeta>.*)-(?P<author>.*)'
                    })
        expected = {
            'category': 'yeah',
            'author': 'Alexis Métaireau',
            'title': 'Rst with filename metadata',
            'date': datetime.datetime(2012, 11, 29),
            'slug': 'article_with_filename_metadata',
            'mymeta': 'foo',
        }
        for key, value in metadata.items():
            self.assertEqual(value, expected[key], key)
Ejemplo n.º 14
0
    def test_article_with_filename_metadata(self):
        content, metadata = readers.read_file(
                _filename('2012-11-29_rst_w_filename_meta#foo-bar.rst'),
                settings={})
        expected = {
            'category': 'yeah',
            'author': u'Alexis Métaireau',
            'title': 'Rst with filename metadata',
        }
        for key, value in metadata.items():
            self.assertEquals(value, expected[key], key)

        content, metadata = readers.read_file(
                _filename('2012-11-29_rst_w_filename_meta#foo-bar.rst'),
                settings={
                    'FILENAME_METADATA': '(?P<date>\d{4}-\d{2}-\d{2}).*'
                    })
        expected = {
            'category': 'yeah',
            'author': u'Alexis Métaireau',
            'title': 'Rst with filename metadata',
            'date': datetime.datetime(2012, 11, 29),
        }
        for key, value in metadata.items():
            self.assertEquals(value, expected[key], key)

        content, metadata = readers.read_file(
                _filename('2012-11-29_rst_w_filename_meta#foo-bar.rst'),
                settings={
                    'FILENAME_METADATA': '(?P<date>\d{4}-\d{2}-\d{2})_' \
                                         '_(?P<Slug>.*)' \
                                         '#(?P<MyMeta>.*)-(?P<author>.*)'
                    })
        expected = {
            'category': 'yeah',
            'author': u'Alexis Métaireau',
            'title': 'Rst with filename metadata',
            'date': datetime.datetime(2012, 11, 29),
            'slug': 'article_with_filename_metadata',
            'mymeta': 'foo',
        }
        for key, value in metadata.items():
            self.assertEquals(value, expected[key], key)
Ejemplo n.º 15
0
    def test_typogrify(self):
        # if nothing is specified in the settings, the content should be
        # unmodified
        content, _ = readers.read_file(_filename('article.rst'))
        expected = "<p>This is some content. With some stuff to "\
                   "&quot;typogrify&quot;.</p>\n"

        self.assertEqual(content, expected)

        try:
            # otherwise, typogrify should be applied
            content, _ = readers.read_file(_filename('article.rst'),
                                           settings={'TYPOGRIFY': True})
            expected = "<p>This is some content. With some stuff to&nbsp;"\
                       "&#8220;typogrify&#8221;.</p>\n"

            self.assertEqual(content, expected)
        except ImportError:
            return unittest.skip('need the typogrify distribution')
Ejemplo n.º 16
0
    def generate_context(self):
        """change the context"""

        # return the list of files to use
        files = self.get_files(self.path, exclude=[
            'pages',
        ])
        all_articles = []
        for f in files:
            content, metadatas = read_file(f)

            # if no category is set, use the name of the path as a category
            if 'category' not in metadatas.keys():
                category = os.path.basename(os.path.dirname(f))

                if category == self.path:
                    category = self.settings['DEFAULT_CATEGORY']

                if category != '':
                    metadatas['category'] = unicode(category)

            if 'date' not in metadatas.keys()\
                and self.settings['FALLBACK_ON_FS_DATE']:
                metadatas['date'] = datetime.fromtimestamp(os.stat(f).st_ctime)

            article = Article(content,
                              metadatas,
                              settings=self.settings,
                              filename=f)
            if not is_valid_content(article, f):
                continue

            if hasattr(article, 'tags'):
                for tag in article.tags:
                    self.tags[tag].append(article)
            all_articles.append(article)

        self.articles, self.translations = process_translations(all_articles)

        for article in self.articles:
            # only main articles are listed in categories, not translations
            self.categories[article.category].append(article)

        # sort the articles by date
        self.articles.sort(key=attrgetter('date'), reverse=True)
        self.dates = list(self.articles)
        self.dates.sort(key=attrgetter('date'),
                        reverse=self.context['REVERSE_ARCHIVE_ORDER'])
        # and generate the output :)

        # order the categories per name
        self.categories = list(self.categories.items())
        self.categories.sort(
            reverse=self.settings.get('REVERSE_CATEGORY_ORDER'))
        self._update_context(('articles', 'dates', 'tags', 'categories'))
Ejemplo n.º 17
0
 def generate_context(self):
     all_pages = []
     for f in self.get_files(os.sep.join((self.path, 'pages'))):
         try:
             content, metadata = read_file(f)
         except Exception, e:
             error(u'Could not process %s\n%s' % (filename, str(e)))
             continue
         page = Page(content, metadata, settings=self.settings, filename=f)
         if not is_valid_content(page, f):
             continue
         all_pages.append(page)
Ejemplo n.º 18
0
    def test_article_with_filename_metadata(self):
        content, metadata = readers.read_file(
                _filename('2012-11-30_md_w_filename_meta#foo-bar.md'),
                settings={})
        expected = {
            'category': 'yeah',
            'author': u'Alexis Métaireau',
        }
        for key, value in expected.items():
            self.assertEquals(value, metadata[key], key)

        content, metadata = readers.read_file(
                _filename('2012-11-30_md_w_filename_meta#foo-bar.md'),
                settings={
                    'FILENAME_METADATA': '(?P<date>\d{4}-\d{2}-\d{2}).*'
                    })
        expected = {
            'category': 'yeah',
            'author': u'Alexis Métaireau',
            'date': datetime.datetime(2012, 11, 30),
        }
        for key, value in expected.items():
            self.assertEquals(value, metadata[key], key)

        content, metadata = readers.read_file(
                _filename('2012-11-30_md_w_filename_meta#foo-bar.md'),
                settings={
                    'FILENAME_METADATA': '(?P<date>\d{4}-\d{2}-\d{2})'
                                         '_(?P<Slug>.*)'
                                         '#(?P<MyMeta>.*)-(?P<author>.*)'
                    })
        expected = {
            'category': 'yeah',
            'author': u'Alexis Métaireau',
            'date': datetime.datetime(2012, 11, 30),
            'slug': 'md_w_filename_meta',
            'mymeta': 'foo',
        }
        for key, value in expected.items():
            self.assertEquals(value, metadata[key], key)
Ejemplo n.º 19
0
    def test_article_with_filename_metadata(self):
        content, metadata = readers.read_file(
                _path('2012-11-30_md_w_filename_meta#foo-bar.md'),
                settings={})
        expected = {
            'category': 'yeah',
            'author': 'Alexis Métaireau',
        }
        for key, value in expected.items():
            self.assertEquals(value, metadata[key], key)

        content, metadata = readers.read_file(
                _path('2012-11-30_md_w_filename_meta#foo-bar.md'),
                settings={
                    'FILENAME_METADATA': '(?P<date>\d{4}-\d{2}-\d{2}).*'
                    })
        expected = {
            'category': 'yeah',
            'author': 'Alexis Métaireau',
            'date': datetime.datetime(2012, 11, 30),
        }
        for key, value in expected.items():
            self.assertEquals(value, metadata[key], key)

        content, metadata = readers.read_file(
                _path('2012-11-30_md_w_filename_meta#foo-bar.md'),
                settings={
                    'FILENAME_METADATA': '(?P<date>\d{4}-\d{2}-\d{2})'
                                         '_(?P<Slug>.*)'
                                         '#(?P<MyMeta>.*)-(?P<author>.*)'
                    })
        expected = {
            'category': 'yeah',
            'author': 'Alexis Métaireau',
            'date': datetime.datetime(2012, 11, 30),
            'slug': 'md_w_filename_meta',
            'mymeta': 'foo',
        }
        for key, value in expected.items():
            self.assertEquals(value, metadata[key], key)
Ejemplo n.º 20
0
 def generate_context(self):
     all_pages = []
     for f in self.get_files(os.sep.join((self.path, 'pages'))):
         try:
             content, metadata = read_file(f)
         except Exception, e:
             error(u'Could not process %s\n%s' % (filename, str(e)))
             continue
         page = Page(content, metadata, settings=self.settings,
                     filename=f)
         if not is_valid_content(page, f):
             continue
         all_pages.append(page)
Ejemplo n.º 21
0
    def generate_context(self):
        all_pages = []
        for f in self.get_files(os.sep.join((self.path, 'pages'))):
            content, metadatas = read_file(f)
            page = Page(content, metadatas, settings=self.settings, filename=f)
            if not is_valid_content(page, f):
                continue
            all_pages.append(page)

        self.pages, self.translations = process_translations(all_pages)

        self._update_context(('pages', ))
        self.context['PAGES'] = self.pages
Ejemplo n.º 22
0
    def generate_context(self):
        """change the context"""

        # return the list of files to use
        files = self.get_files(self.path, exclude=['pages',])
        all_articles = []
        for f in files:
            content, metadatas = read_file(f)

            # if no category is set, use the name of the path as a category
            if 'category' not in metadatas.keys():
                category = os.path.basename(os.path.dirname(f))

                if category == self.path:
                    category = self.settings['DEFAULT_CATEGORY']

                if category != '':
                    metadatas['category'] = unicode(category)

            if 'date' not in metadatas.keys()\
                and self.settings['FALLBACK_ON_FS_DATE']:
                    metadatas['date'] = datetime.fromtimestamp(os.stat(f).st_ctime)

            article = Article(content, metadatas, settings=self.settings,
                              filename=f)
            if not is_valid_content(article, f):
                continue

            if hasattr(article, 'tags'):
                for tag in article.tags:
                    self.tags[tag].append(article)
            all_articles.append(article)

        self.articles, self.translations = process_translations(all_articles)

        for article in self.articles:
            # only main articles are listed in categories, not translations
            self.categories[article.category].append(article)


        # sort the articles by date
        self.articles.sort(key=attrgetter('date'), reverse=True)
        self.dates = list(self.articles)
        self.dates.sort(key=attrgetter('date'), 
                reverse=self.context['REVERSE_ARCHIVE_ORDER'])
        # and generate the output :)

        # order the categories per name
        self.categories = list(self.categories.items())
        self.categories.sort(reverse=self.settings.get('REVERSE_CATEGORY_ORDER'))
        self._update_context(('articles', 'dates', 'tags', 'categories'))
Ejemplo n.º 23
0
 def generate_context(self):
     all_pages = []
     for f in self.get_files(
         os.path.join(self.path, self.settings["PAGE_DIR"]), exclude=self.settings["PAGE_EXCLUDES"]
     ):
         try:
             content, metadata = read_file(f)
         except Exception, e:
             logger.error(u"Could not process %s\n%s" % (f, str(e)))
             continue
         page = Page(content, metadata, settings=self.settings, filename=f)
         if not is_valid_content(page, f):
             continue
         all_pages.append(page)
Ejemplo n.º 24
0
    def test_typogrify(self):
        # if nothing is specified in the settings, the content should be
        # unmodified
        content, _ = readers.read_file(_filename('article.rst'))
        expected = "<p>This is some content. With some stuff to "\
                   "&quot;typogrify&quot;.</p>\n<p>Now with added "\
                   'support for <abbr title="three letter acronym">'\
                   'TLA</abbr>.</p>\n'

        self.assertEqual(content, expected)

        try:
            # otherwise, typogrify should be applied
            content, _ = readers.read_file(_filename('article.rst'),
                                           settings={'TYPOGRIFY': True})
            expected = u"<p>This is some content. With some stuff to&nbsp;"\
                       "&#8220;typogrify&#8221;.</p>\n<p>Now with added "\
                       'support for <abbr title="three letter acronym">'\
                       '<span class="caps">TLA</span></abbr>.</p>\n'

            self.assertEqual(content, expected)
        except ImportError:
            return unittest.skip('need the typogrify distribution')
Ejemplo n.º 25
0
    def generate_context(self):
        all_pages = []
        for f in self.get_files(os.sep.join((self.path, 'pages'))):
            content, metadata = read_file(f)
            page = Page(content, metadata, settings=self.settings,
                        filename=f)
            if not is_valid_content(page, f):
                continue
            all_pages.append(page)

        self.pages, self.translations = process_translations(all_pages)

        self._update_context(('pages', ))
        self.context['PAGES'] = self.pages
Ejemplo n.º 26
0
    def test_typogrify(self):
        # if nothing is specified in the settings, the content should be
        # unmodified
        content, _ = readers.read_file(_filename('article.rst'))
        expected = "<p>This is some content. With some stuff to "\
                   "&quot;typogrify&quot;.</p>\n<p>Now with added "\
                   'support for <abbr title="three letter acronym">'\
                   'TLA</abbr>.</p>\n'

        self.assertEqual(content, expected)

        try:
            # otherwise, typogrify should be applied
            content, _ = readers.read_file(_filename('article.rst'),
                                           settings={'TYPOGRIFY': True})
            expected = u"<p>This is some content. With some stuff to&nbsp;"\
                       "&#8220;typogrify&#8221;.</p>\n<p>Now with added "\
                       'support for <abbr title="three letter acronym">'\
                       '<span class="caps">TLA</span></abbr>.</p>\n'

            self.assertEqual(content, expected)
        except ImportError:
            return unittest.skip('need the typogrify distribution')
Ejemplo n.º 27
0
 def generate_context(self):
     all_pages = []
     for f in self.get_files(os.path.join(self.path,
                                          self.settings['PAGE_DIR']),
                             exclude=self.settings['PAGE_EXCLUDES']):
         try:
             content, metadata = read_file(f)
         except Exception, e:
             logger.error(u'Could not process %s\n%s' % (f, str(e)))
             continue
         page = Page(content, metadata, settings=self.settings, filename=f)
         if not is_valid_content(page, f):
             continue
         all_pages.append(page)
Ejemplo n.º 28
0
    def generate_context(self):
        """change the context"""

        article_path = os.path.normpath(  # we have to remove trailing slashes
            os.path.join(self.path, self.settings["ARTICLE_DIR"])
        )
        all_articles = []
        for f in self.get_files(article_path, exclude=self.settings["ARTICLE_EXCLUDES"]):
            try:
                content, metadata = read_file(f, settings=self.settings)
            except Exception, e:
                logger.warning(u"Could not process %s\n%s" % (f, str(e)))
                continue

            # if no category is set, use the name of the path as a category
            if "category" not in metadata:

                if os.path.dirname(f) == article_path:  # if the article is not in a subdirectory
                    category = self.settings["DEFAULT_CATEGORY"]
                else:
                    category = os.path.basename(os.path.dirname(f)).decode("utf-8")

                if category != "":
                    metadata["category"] = Category(category, self.settings)

            if "date" not in metadata and self.settings["DEFAULT_DATE"]:
                if self.settings["DEFAULT_DATE"] == "fs":
                    metadata["date"] = datetime.datetime.fromtimestamp(os.stat(f).st_ctime)
                else:
                    metadata["date"] = datetime.datetime(*self.settings["DEFAULT_DATE"])

            signals.article_generate_context.send(self, metadata=metadata)
            article = Article(content, metadata, settings=self.settings, filename=f)
            if not is_valid_content(article, f):
                continue

            if article.status == "published":
                if hasattr(article, "tags"):
                    for tag in article.tags:
                        self.tags[tag].append(article)
                all_articles.append(article)
            elif article.status == "draft":
                self.drafts.append(article)
            else:
                logger.warning(
                    u"Unknown status %s for file %s, skipping it."
                    % (repr(unicode.encode(article.status, "utf-8")), repr(f))
                )
Ejemplo n.º 29
0
    def generate_context(self):
        self.staticfiles = []

        # walk static paths
        for static_path in self.settings['STATIC_PATHS']:
            for f in self.get_files(
                    static_path, extensions=False):
                static = read_file(
                    base_path=self.path, path=f, content_class=Static,
                    fmt='static',
                    settings=self.settings, context=self.context,
                    preread_signal=signals.static_generator_preread,
                    preread_sender=self,
                    context_signal=signals.static_generator_context,
                    context_sender=self)
                self.staticfiles.append(static)
                self.add_source_path(static)
Ejemplo n.º 30
0
    def test_article_with_markdown_markup_extension(self):
        # test to ensure the markdown markup extension is being processed as expected
        content, metadata = readers.read_file(
            _path('article_with_markdown_markup_extensions.md'),
            settings={'MD_EXTENSIONS': ['toc', 'codehilite', 'extra']})
        expected = '<div class="toc">\n'\
            '<ul>\n'\
            '<li><a href="#level1">Level1</a><ul>\n'\
            '<li><a href="#level2">Level2</a></li>\n'\
            '</ul>\n'\
            '</li>\n'\
            '</ul>\n'\
            '</div>\n'\
            '<h2 id="level1">Level1</h2>\n'\
            '<h3 id="level2">Level2</h3>'

        self.assertEqual(content, expected)
Ejemplo n.º 31
0
    def test_article_with_markdown_markup_extension(self):
        # test to ensure the markdown markup extension is being processed as expected
        content, metadata = readers.read_file(
                _filename('article_with_markdown_markup_extensions.md'),
                settings={'MD_EXTENSIONS': ['toc', 'codehilite', 'extra']})
        expected = '<div class="toc">\n'\
            '<ul>\n'\
            '<li><a href="#level1">Level1</a><ul>\n'\
            '<li><a href="#level2">Level2</a></li>\n'\
            '</ul>\n'\
            '</li>\n'\
            '</ul>\n'\
            '</div>\n'\
            '<h2 id="level1">Level1</h2>\n'\
            '<h3 id="level2">Level2</h3>'

        self.assertEqual(content, expected)
Ejemplo n.º 32
0
    def generate_context(self):
        for f in self.get_files(
                os.path.join(self.path, self.settings['BIO_DIR']),
                exclude=self.settings['BIO_EXCLUDES']):
            try:
                content, metadata = read_file(f, settings=self.settings)
                split_and_strip(metadata, 'roles')
            except Exception, e:
                logger.warning(u'Could not process %s\n%s' % (f, str(e)))
                continue
            bio = Bio(content, metadata, settings=self.settings,
                        source_path=f, context=self.context)
            if not is_valid_content(bio, f):
                continue

            self.add_source_path(bio)

            conference.add_bio(bio)
Ejemplo n.º 33
0
 def generate_context(self):
     bio_pic_path = self.settings['BIO_PIC_PATH']
     for f in self.get_files(
             os.path.join(self.path, bio_pic_path), extensions=False):
         f_rel = os.path.relpath(f, self.path)
         content, metadata = read_file(
             f, fmt='static', settings=self.settings)
         basename = os.path.splitext(os.path.basename(f))[0]
         metadata['save_as'] = f_rel
         metadata['url'] = path_to_url(metadata['save_as'])
         metadata['slug'] = slugify(basename)
         sc = BioPic(
             content=None,
             metadata=metadata,
             settings=self.settings,
             source_path=f_rel)
         conference.bio_pics[sc.slug] = sc
         self.add_source_path(sc)
Ejemplo n.º 34
0
    def generate_context(self):
        self.staticfiles = []

        # walk static paths
        for static_path in self.settings['STATIC_PATHS']:
            for f in self.get_files(static_path, extensions=False):
                static = read_file(
                    base_path=self.path,
                    path=f,
                    content_class=Static,
                    fmt='static',
                    settings=self.settings,
                    context=self.context,
                    preread_signal=signals.static_generator_preread,
                    preread_sender=self,
                    context_signal=signals.static_generator_context,
                    context_sender=self)
                self.staticfiles.append(static)
                self.add_source_path(static)
Ejemplo n.º 35
0
    def generate_context(self):
        """change the context"""

        article_path = os.path.join(self.path, self.settings['ARTICLE_DIR'])
        all_articles = []
        for f in self.get_files(
                article_path,
                exclude=self.settings['ARTICLE_EXCLUDES']):
            try:
                content, metadata = read_file(f, settings=self.settings)
            except Exception, e:
                logger.warning(u'Could not process %s\n%s' % (f, str(e)))
                continue

            # if no category is set, use the name of the path as a category
            if 'category' not in metadata:

                if os.path.dirname(f) == article_path:
                    category = self.settings['DEFAULT_CATEGORY']
                else:
                    category = os.path.basename(os.path.dirname(f))\
                                .decode('utf-8')

                if category != '':
                    metadata['category'] = Category(category, self.settings)

            if 'date' not in metadata and self.settings['FALLBACK_ON_FS_DATE']:
                    metadata['date'] = datetime.datetime.fromtimestamp(
                                        os.stat(f).st_ctime)

            article = Article(content, metadata, settings=self.settings,
                              filename=f)
            if not is_valid_content(article, f):
                continue

            if article.status == "published":
                if hasattr(article, 'tags'):
                    for tag in article.tags:
                        self.tags[tag].append(article)
                all_articles.append(article)
            elif article.status == "draft":
                self.drafts.append(article)
Ejemplo n.º 36
0
    def generate_context(self):
        all_pages = []
        for f in self.get_files(os.sep.join((self.path, 'pages'))):
            content, metadata = read_file(f)
            page = Page(content, metadata, settings=self.settings,
                        filename=f)
            if not is_valid_content(page, f):
                continue

            if self.settings.get('CLEAN_URLS_NO_PROXY'):
                # cleaning page url
                page.save_as = os.path.join(page.slug, 'index.html')
                page.url = os.path.dirname(page.save_as) + '/'

            all_pages.append(page)

        self.pages, self.translations = process_translations(all_pages)

        self._update_context(('pages', ))
        self.context['PAGES'] = self.pages
Ejemplo n.º 37
0
    def generate_context(self):
        all_pages = []

        pagecat_map = dict(self.settings.get('PAGECAT_MAP'))
        for f in self.get_files(
                os.path.join(self.path, self.settings['PAGE_DIR']),
                exclude=self.settings['PAGE_EXCLUDES']):
            try:
                content, metadata = read_file(f, settings=self.settings)
            except Exception, e:
                print(u'Could not process %s\n%s' % (f, str(e)))
                continue

            # if no sorting is set, set default to 99
            if 'sorting' not in metadata.keys():
                metadata['sorting'] = 99

            # if no category is set, use the name of the path as a category
            if 'category' not in metadata.keys():

                if os.path.dirname(f) == self.path:
                    category = 'NO_CATEGORY'
                else:
                    category = os.path.basename(os.path.dirname(f)).decode('utf-8')

                if category != '':
                    title = pagecat_map[category] \
                                if category in pagecat_map else None
                    metadata['category'] = PagesCategory(
                            category, self.settings, title)

            page = Page(content, metadata, settings=self.settings,
                        filename=f)
            if not is_valid_content(page, f):
                continue

            # all pages which status is not "published" will be ignored
            if page.status == "published":
                all_pages.append(page)
            else:
                self.ignored_pages.append(page)
Ejemplo n.º 38
0
    def generate_context(self):
        all_pages = []
        hidden_pages = []
        for f in self.get_files(
                self.settings['PAGE_DIR'],
                exclude=self.settings['PAGE_EXCLUDES']):
            try:
                page = read_file(
                    base_path=self.path, path=f, content_class=Page,
                    settings=self.settings, context=self.context,
                    preread_signal=signals.page_generator_preread,
                    preread_sender=self,
                    context_signal=signals.page_generator_context,
                    context_sender=self)
            except Exception as e:
                logger.warning('Could not process {}\n{}'.format(f, e))
                continue

            if not is_valid_content(page, f):
                continue

            self.add_source_path(page)

            if page.status == "published":
                all_pages.append(page)
            elif page.status == "hidden":
                hidden_pages.append(page)
            else:
                logger.warning("Unknown status %s for file %s, skipping it." %
                               (repr(page.status),
                                repr(f)))

        self.pages, self.translations = process_translations(all_pages)
        self.hidden_pages, self.hidden_translations = (
                process_translations(hidden_pages))

        self._update_context(('pages', ))
        self.context['PAGES'] = self.pages

        signals.page_generator_finalized.send(self)
Ejemplo n.º 39
0
    def generate_context(self):
        all_pages = []
        hidden_pages = []
        for f in self.get_files(self.settings['PAGE_DIR'],
                                exclude=self.settings['PAGE_EXCLUDES']):
            try:
                page = read_file(base_path=self.path,
                                 path=f,
                                 content_class=Page,
                                 settings=self.settings,
                                 context=self.context,
                                 preread_signal=signals.page_generator_preread,
                                 preread_sender=self,
                                 context_signal=signals.page_generator_context,
                                 context_sender=self)
            except Exception as e:
                logger.warning('Could not process {}\n{}'.format(f, e))
                continue

            if not is_valid_content(page, f):
                continue

            self.add_source_path(page)

            if page.status == "published":
                all_pages.append(page)
            elif page.status == "hidden":
                hidden_pages.append(page)
            else:
                logger.warning("Unknown status %s for file %s, skipping it." %
                               (repr(page.status), repr(f)))

        self.pages, self.translations = process_translations(all_pages)
        self.hidden_pages, self.hidden_translations = (
            process_translations(hidden_pages))

        self._update_context(('pages', ))
        self.context['PAGES'] = self.pages

        signals.page_generator_finalized.send(self)
Ejemplo n.º 40
0
    def generate_context(self):
        candidates_path = os.path.normpath(
            os.path.join(self.path, 'candidates')
        )

        for f in self.get_files(candidates_path):
            try:
                content, metadata = read_file(f, settings=self.settings)
            except Exception, e:
                continue

            # Position is determined by directory
            position = os.path.basename(os.path.dirname(f)).decode('utf-8')
            metadata['position'] = position

            candidate = Candidate(content, metadata, settings=self.settings, filename=f)

            if not is_valid_content(candidate, f):
                continue

            if not self.candidates.has_key(position):
                self.candidates[position] = []

            self.candidates[position].append(candidate)
Ejemplo n.º 41
0
 def read_file(self, path, **kwargs):
     # Isolate from future API changes to readers.read_file
     return readers.read_file(_path(path), settings=get_settings(**kwargs))
Ejemplo n.º 42
0
    def generate_context(self):
        """Add the articles into the shared context"""

        all_articles = []
        for f in self.get_files(
                self.settings['ARTICLE_DIR'],
                exclude=self.settings['ARTICLE_EXCLUDES']):
            try:
                article = read_file(
                    base_path=self.path, path=f, content_class=Article,
                    settings=self.settings, context=self.context,
                    preread_signal=signals.article_generator_preread,
                    preread_sender=self,
                    context_signal=signals.article_generator_context,
                    context_sender=self)
            except Exception as e:
                logger.warning('Could not process {}\n{}'.format(f, e))
                continue

            if not is_valid_content(article, f):
                continue

            self.add_source_path(article)

            if article.status == "published":
                all_articles.append(article)
            elif article.status == "draft":
                self.drafts.append(article)
            else:
                logger.warning("Unknown status %s for file %s, skipping it." %
                               (repr(article.status),
                                repr(f)))

        self.articles, self.translations = process_translations(all_articles)

        for article in self.articles:
            # only main articles are listed in categories and tags
            # not translations
            self.categories[article.category].append(article)
            if hasattr(article, 'tags'):
                for tag in article.tags:
                    self.tags[tag].append(article)
            # ignore blank authors as well as undefined
            if hasattr(article, 'author') and article.author.name != '':
                self.authors[article.author].append(article)


        # sort the articles by date
        self.articles.sort(key=attrgetter('date'), reverse=True)
        self.dates = list(self.articles)
        self.dates.sort(key=attrgetter('date'),
                reverse=self.context['NEWEST_FIRST_ARCHIVES'])

        # create tag cloud
        tag_cloud = defaultdict(int)
        for article in self.articles:
            for tag in getattr(article, 'tags', []):
                tag_cloud[tag] += 1

        tag_cloud = sorted(tag_cloud.items(), key=itemgetter(1), reverse=True)
        tag_cloud = tag_cloud[:self.settings.get('TAG_CLOUD_MAX_ITEMS')]

        tags = list(map(itemgetter(1), tag_cloud))
        if tags:
            max_count = max(tags)
        steps = self.settings.get('TAG_CLOUD_STEPS')

        # calculate word sizes
        self.tag_cloud = [
            (
                tag,
                int(math.floor(steps - (steps - 1) * math.log(count)
                    / (math.log(max_count)or 1)))
            )
            for tag, count in tag_cloud
        ]
        # put words in chaos
        random.shuffle(self.tag_cloud)

        # and generate the output :)

        # order the categories per name
        self.categories = list(self.categories.items())
        self.categories.sort(
                reverse=self.settings['REVERSE_CATEGORY_ORDER'])

        self.authors = list(self.authors.items())
        self.authors.sort()

        self._update_context(('articles', 'dates', 'tags', 'categories',
                              'tag_cloud', 'authors', 'related_posts'))

        signals.article_generator_finalized.send(self)
Ejemplo n.º 43
0
    def generate_context(self):
        """Add the articles into the shared context"""

        article_path = os.path.normpath(  # we have to remove trailing slashes
            os.path.join(self.path, self.settings['ARTICLE_DIR'])
        )
        all_articles = []
        for f in self.get_files(
                article_path,
                exclude=self.settings['ARTICLE_EXCLUDES']):
            try:
                signals.article_generate_preread.send(self)
                content, metadata = read_file(f, settings=self.settings)
            except Exception as e:
                logger.warning(u'Could not process %s\n%s' % (f, str(e)))
                continue

            # if no category is set, use the name of the path as a category
            if 'category' not in metadata:

                if (self.settings['USE_FOLDER_AS_CATEGORY']
                    and os.path.dirname(f) != article_path):
                    # if the article is in a subdirectory
                    category = os.path.basename(os.path.dirname(f))\
                        .decode('utf-8')
                else:
                    # if the article is not in a subdirectory
                    category = self.settings['DEFAULT_CATEGORY']

                if category != '':
                    metadata['category'] = Category(category, self.settings)

            if 'date' not in metadata and self.settings.get('DEFAULT_DATE'):
                if self.settings['DEFAULT_DATE'] == 'fs':
                    metadata['date'] = datetime.datetime.fromtimestamp(
                            os.stat(f).st_ctime)
                else:
                    metadata['date'] = datetime.datetime(
                            *self.settings['DEFAULT_DATE'])

            signals.article_generate_context.send(self, metadata=metadata)
            article = Article(content, metadata, settings=self.settings,
                              filename=f, context=self.context)
            if not is_valid_content(article, f):
                continue

            self.add_filename(article)

            if article.status == "published":
                if hasattr(article, 'tags'):
                    for tag in article.tags:
                        self.tags[tag].append(article)
                all_articles.append(article)
            elif article.status == "draft":
                self.drafts.append(article)
            else:
                logger.warning(u"Unknown status %s for file %s, skipping it." %
                               (repr(unicode.encode(article.status, 'utf-8')),
                                repr(f)))

        self.articles, self.translations = process_translations(all_articles)

        for article in self.articles:
            # only main articles are listed in categories, not translations
            self.categories[article.category].append(article)
            # ignore blank authors as well as undefined
            if hasattr(article,'author') and article.author.name != '':
                self.authors[article.author].append(article)

        # sort the articles by date
        self.articles.sort(key=attrgetter('date'), reverse=True)
        self.dates = list(self.articles)
        self.dates.sort(key=attrgetter('date'),
                reverse=self.context['NEWEST_FIRST_ARCHIVES'])

        # create tag cloud
        tag_cloud = defaultdict(int)
        for article in self.articles:
            for tag in getattr(article, 'tags', []):
                tag_cloud[tag] += 1

        tag_cloud = sorted(tag_cloud.items(), key=itemgetter(1), reverse=True)
        tag_cloud = tag_cloud[:self.settings.get('TAG_CLOUD_MAX_ITEMS')]

        tags = map(itemgetter(1), tag_cloud)
        if tags:
            max_count = max(tags)
        steps = self.settings.get('TAG_CLOUD_STEPS')

        # calculate word sizes
        self.tag_cloud = [
            (
                tag,
                int(math.floor(steps - (steps - 1) * math.log(count)
                    / (math.log(max_count)or 1)))
            )
            for tag, count in tag_cloud
        ]
        # put words in chaos
        random.shuffle(self.tag_cloud)

        # and generate the output :)

        # order the categories per name
        self.categories = list(self.categories.items())
        self.categories.sort(
                key=lambda item: item[0].name,
                reverse=self.settings['REVERSE_CATEGORY_ORDER'])

        self.authors = list(self.authors.items())
        self.authors.sort(key=lambda item: item[0].name)

        self._update_context(('articles', 'dates', 'tags', 'categories',
                              'tag_cloud', 'authors', 'related_posts'))

        signals.article_generator_finalized.send(self)
Ejemplo n.º 44
0
 def read_file(self, path, **kwargs):
     # Isolate from future API changes to readers.read_file
     return readers.read_file(
         _path(path), settings=get_settings(**kwargs))
Ejemplo n.º 45
0
 def read_file(self, path, **kwargs):
     # Isolate from future API changes to readers.read_file
     return readers.read_file(base_path=CONTENT_PATH,
                              path=path,
                              settings=get_settings(**kwargs))
Ejemplo n.º 46
0
    def generate_context(self):
        """Add the articles into the shared context"""

        all_articles = []
        for f in self.get_files(self.settings['ARTICLE_DIR'],
                                exclude=self.settings['ARTICLE_EXCLUDES']):
            try:
                article = read_file(
                    base_path=self.path,
                    path=f,
                    content_class=Article,
                    settings=self.settings,
                    context=self.context,
                    preread_signal=signals.article_generator_preread,
                    preread_sender=self,
                    context_signal=signals.article_generator_context,
                    context_sender=self)
            except Exception as e:
                logger.warning('Could not process {}\n{}'.format(f, e))
                continue

            if not is_valid_content(article, f):
                continue

            self.add_source_path(article)

            if article.status == "published":
                all_articles.append(article)
            elif article.status == "draft":
                self.drafts.append(article)
            else:
                logger.warning("Unknown status %s for file %s, skipping it." %
                               (repr(article.status), repr(f)))

        self.articles, self.translations = process_translations(all_articles)

        for article in self.articles:
            # only main articles are listed in categories and tags
            # not translations
            self.categories[article.category].append(article)
            if hasattr(article, 'tags'):
                for tag in article.tags:
                    self.tags[tag].append(article)
            # ignore blank authors as well as undefined
            if hasattr(article, 'author') and article.author.name != '':
                self.authors[article.author].append(article)

        # sort the articles by date
        self.articles.sort(key=attrgetter('date'), reverse=True)
        self.dates = list(self.articles)
        self.dates.sort(key=attrgetter('date'),
                        reverse=self.context['NEWEST_FIRST_ARCHIVES'])

        # create tag cloud
        tag_cloud = defaultdict(int)
        for article in self.articles:
            for tag in getattr(article, 'tags', []):
                tag_cloud[tag] += 1

        tag_cloud = sorted(tag_cloud.items(), key=itemgetter(1), reverse=True)
        tag_cloud = tag_cloud[:self.settings.get('TAG_CLOUD_MAX_ITEMS')]

        tags = list(map(itemgetter(1), tag_cloud))
        if tags:
            max_count = max(tags)
        steps = self.settings.get('TAG_CLOUD_STEPS')

        # calculate word sizes
        self.tag_cloud = [
            (tag,
             int(
                 math.floor(steps - (steps - 1) * math.log(count) /
                            (math.log(max_count) or 1))))
            for tag, count in tag_cloud
        ]
        # put words in chaos
        random.shuffle(self.tag_cloud)

        # and generate the output :)

        # order the categories per name
        self.categories = list(self.categories.items())
        self.categories.sort(reverse=self.settings['REVERSE_CATEGORY_ORDER'])

        self.authors = list(self.authors.items())
        self.authors.sort()

        self._update_context(('articles', 'dates', 'tags', 'categories',
                              'tag_cloud', 'authors', 'related_posts'))

        signals.article_generator_finalized.send(self)
Ejemplo n.º 47
0
    def generate_context(self):
        """change the context"""

        # return the list of files to use
        files = self.get_files(self.path, exclude=[
            'pages',
        ])
        all_articles = []
        for f in files:
            content, metadatas = read_file(f)

            # if no category is set, use the name of the path as a category
            if 'category' not in metadatas.keys():

                if os.path.dirname(f) == self.path:
                    category = self.settings['DEFAULT_CATEGORY']
                else:
                    category = os.path.basename(os.path.dirname(f))

                if category != '':
                    metadatas['category'] = unicode(category)

            if 'date' not in metadatas.keys()\
                and self.settings['FALLBACK_ON_FS_DATE']:
                metadatas['date'] = datetime.fromtimestamp(os.stat(f).st_ctime)

            article = Article(content,
                              metadatas,
                              settings=self.settings,
                              filename=f)
            if not is_valid_content(article, f):
                continue

            if hasattr(article, 'tags'):
                for tag in article.tags:
                    self.tags[tag].append(article)
            all_articles.append(article)

        self.articles, self.translations = process_translations(all_articles)

        for article in self.articles:
            # only main articles are listed in categories, not translations
            self.categories[article.category].append(article)

        # sort the articles by date
        self.articles.sort(key=attrgetter('date'), reverse=True)
        self.dates = list(self.articles)
        self.dates.sort(key=attrgetter('date'),
                        reverse=self.context['REVERSE_ARCHIVE_ORDER'])

        # create tag cloud
        tag_cloud = defaultdict(int)
        for article in self.articles:
            for tag in getattr(article, 'tags', []):
                tag_cloud[tag] += 1

        tag_cloud = sorted(tag_cloud.items(), key=itemgetter(1), reverse=True)
        tag_cloud = tag_cloud[:self.settings.get('TAG_CLOUD_MAX_ITEMS')]

        tags = map(itemgetter(1), tag_cloud)
        if tags:
            max_count = max(tags)
        steps = self.settings.get('TAG_CLOUD_STEPS')

        # calculate word sizes
        self.tag_cloud = [
            (tag,
             int(
                 math.floor(steps - (steps - 1) * math.log(count) /
                            math.log(max_count)))) for tag, count in tag_cloud
        ]
        # put words in chaos
        random.shuffle(self.tag_cloud)

        # and generate the output :)
        self._update_context(
            ('articles', 'dates', 'tags', 'categories', 'tag_cloud'))
Ejemplo n.º 48
0
def on_content_object_init(content):
    if isinstance(content, pelican.contents.Static):
        return

    source_path = content.source_path
    repository = discover_repository(os.path.dirname(source_path))
    if not repository:
        return

    repository_file_path = make_repository_file_path(repository, source_path)
    commits = list(repository.iter_commits(paths=repository_file_path))

    # Make the list of commits more intuitive to work with
    commits.reverse()

    if not commits:
        return

    first_commit = commits[0]
    latest_commit = commits[-1]

    # This is a revision, always overwrite the published and updated date
    if hasattr(content, 'git'):
        created = pelican.utils.SafeDatetime.fromtimestamp(content.git.commit.authored_date)
        content.date = created
        content.locale_date = pelican.utils.strftime(created, content.date_format)

        previous_revision = content.git.previous_revision
        if not previous_revision:
            return

        # The next revision isn't available at this point, only previous ones due to the order in which Pelican's
        # content_object_init signal is sent. Instead the modified date of the previous revision is set to the created
        # date of this one
        previous_revision.modified = created
        previous_revision.locale_modified = pelican.utils.strftime(created, content.date_format)

        # The current content is a revision, skip creating and setting the git object, since it was already done when
        # processing the first article
        return

    # TODO: Possibly add configuration values to override this behavior
    if not hasattr(content, 'date'):
        created = pelican.utils.SafeDatetime.fromtimestamp(first_commit.authored_date)
        content.date = created
        content.locale_date = pelican.utils.strftime(created, content.date_format)

    if first_commit != latest_commit and not hasattr(content, 'modified'):
        modified = pelican.utils.SafeDatetime.fromtimestamp(latest_commit.authored_date)
        content.modified = modified
        content.locale_modified = pelican.utils.strftime(modified, content.date_format)

    readers = pelican.readers.Readers(content.settings)
    revisions = []
    for commit in commits:
        blob = commit.tree / repository_file_path
        format_ = os.path.splitext(repository_file_path)[1][1:]

        file_descriptor, path = tempfile.mkstemp()
        with os.fdopen(file_descriptor, 'wb') as file:
            file.write(blob.data_stream.read())

        revision = readers.read_file(
            content.settings['PATH'], path, content.__class__, format_, context=content._context
        )
        revision.source_path = content.source_path

        filename = os.path.basename(content.save_as)
        root, extension = os.path.splitext(filename)
        url = content.url

        # This will create URLs in the following format: "<root>/<hexsha>/", where <root> is the last component of the
        # original path with its extension stripped (if available)
        revision.override_save_as = posixpath.join(
            url if url.endswith('/') else posixpath.dirname(url), root if root != 'index' else '', commit.hexsha,
            'index' + extension
        )
        revision.override_url = posixpath.dirname(revision.save_as)

        revisions.append(revision)

    revision_count = len(revisions)
    for index, (commit, revision) in enumerate(zip(commits, revisions)):
        previous_revision = revisions[index-1] if index > 0 else None

        # If this is the second to last revision, point next_revision to the actually generated content instead.
        # Expected behavior is to be directed to the original content URL when attempting to navigate to the latest
        # revision
        next_revision = content if index == revision_count-2 else (
            revisions[index+1] if index < revision_count-1 else None
        )

        revision.git = git_class(commit, repository_file_path, revisions, previous_revision, next_revision)

        # TODO: Find a way to prevent sending this signal twice (previously sent during creation with read_file)
        pelican.signals.content_object_init.send(revision)

    # There's only an actual previous revision if there was more than one commit, i.e. more than one revision
    previous_revision = revisions[-2] if revision_count > 1 else None
    content.git = git_class(latest_commit, repository_file_path, revisions, previous_revision, None)
Ejemplo n.º 49
0
    def generate_context(self):
        """change the context"""

        # return the list of files to use
        files = self.get_files(self.path, exclude=['pages',])
        all_articles = []
        for f in files:
            content, metadata = read_file(f)

            # if no category is set, use the name of the path as a category
            if 'category' not in metadata.keys():

                if os.path.dirname(f) == self.path:
                    category = self.settings['DEFAULT_CATEGORY']
                else:
                    category = os.path.basename(os.path.dirname(f))

                if category != '':
                    metadata['category'] = unicode(category)

            if 'date' not in metadata.keys()\
                and self.settings['FALLBACK_ON_FS_DATE']:
                    metadata['date'] = datetime.fromtimestamp(os.stat(f).st_ctime)

            article = Article(content, metadata, settings=self.settings,
                              filename=f)
            if not is_valid_content(article, f):
                continue

            if article.status == "published":
                if hasattr(article, 'tags'):
                    for tag in article.tags:
                        self.tags[tag].append(article)
                all_articles.append(article)
            elif article.status == "draft":
                self.drafts.append(article)

        self.articles, self.translations = process_translations(all_articles)

        for article in self.articles:
            # only main articles are listed in categories, not translations
            self.categories[article.category].append(article)


        # sort the articles by date
        self.articles.sort(key=attrgetter('date'), reverse=True)
        self.dates = list(self.articles)
        self.dates.sort(key=attrgetter('date'),
                reverse=self.context['REVERSE_ARCHIVE_ORDER'])

        # create tag cloud
        tag_cloud = defaultdict(int)
        for article in self.articles:
            for tag in getattr(article, 'tags', []):
                tag_cloud[tag] += 1

        tag_cloud = sorted(tag_cloud.items(), key = itemgetter(1), reverse = True)
        tag_cloud = tag_cloud[:self.settings.get('TAG_CLOUD_MAX_ITEMS')]

        tags = map(itemgetter(1), tag_cloud)
        if tags:
                max_count = max(tags)
        steps = self.settings.get('TAG_CLOUD_STEPS')

        # calculate word sizes
        self.tag_cloud = [
            (
                tag,
                int(
                    math.floor(steps - (steps - 1) * math.log(count) / (math.log(max_count)or 1))
                )
            )
            for tag, count in tag_cloud
        ]
        # put words in chaos
        random.shuffle(self.tag_cloud)

        # and generate the output :)

        # order the categories per name
        self.categories = list(self.categories.items())
        self.categories.sort(reverse=self.settings.get('REVERSE_CATEGORY_ORDER'))
        self._update_context(('articles', 'dates', 'tags', 'categories', 'tag_cloud'))
Ejemplo n.º 50
0
    def generate_context(self):
        """change the context"""

        # return the list of files to use
        files = self.get_files(self.path, exclude=[
            'pages',
        ])
        all_articles = []
        for f in files:
            content, metadata = read_file(f)

            # if no category is set, use the name of the path as a category
            if 'category' not in metadata.keys():

                if os.path.dirname(f) == self.path:
                    category = self.settings['DEFAULT_CATEGORY']
                else:
                    category = os.path.basename(os.path.dirname(f))

                if category != '':
                    metadata['category'] = unicode(category)

            if 'date' not in metadata.keys()\
                and self.settings['FALLBACK_ON_FS_DATE']:
                metadata['date'] = datetime.fromtimestamp(os.stat(f).st_ctime)

            article = Article(content,
                              metadata,
                              settings=self.settings,
                              filename=f)
            if not is_valid_content(article, f):
                continue

            add_to_url = u''
            if 'ARTICLE_PERMALINK_STRUCTURE' in self.settings:
                article_permalink_structure = self.settings[
                    'ARTICLE_PERMALINK_STRUCTURE']
                article_permalink_structure = article_permalink_structure.lstrip(
                    '/')

                # try to substitute any python datetime directive
                add_to_url = article.date.strftime(article_permalink_structure)
                # try to substitute any article metadata in rest file
                add_to_url = add_to_url % article.__dict__
                add_to_url = [slugify(i) for i in add_to_url.split('/')]
                add_to_url = os.path.join(*add_to_url)

            article.url = urlparse.urljoin(add_to_url, article.url)
            article.save_as = urlparse.urljoin(add_to_url, article.save_as)

            if article.status == "published":
                if hasattr(article, 'tags'):
                    for tag in article.tags:
                        self.tags[tag].append(article)
                all_articles.append(article)
            elif article.status == "draft":
                self.drafts.append(article)

        self.articles, self.translations = process_translations(all_articles)

        for article in self.articles:
            # only main articles are listed in categories, not translations
            self.categories[article.category].append(article)
            self.authors[article.author].append(article)

        # sort the articles by date
        self.articles.sort(key=attrgetter('date'), reverse=True)
        self.dates = list(self.articles)
        self.dates.sort(key=attrgetter('date'),
                        reverse=self.context['REVERSE_ARCHIVE_ORDER'])

        # create tag cloud
        tag_cloud = defaultdict(int)
        for article in self.articles:
            for tag in getattr(article, 'tags', []):
                tag_cloud[tag] += 1

        tag_cloud = sorted(tag_cloud.items(), key=itemgetter(1), reverse=True)
        tag_cloud = tag_cloud[:self.settings.get('TAG_CLOUD_MAX_ITEMS')]

        tags = map(itemgetter(1), tag_cloud)
        if tags:
            max_count = max(tags)
        steps = self.settings.get('TAG_CLOUD_STEPS')

        # calculate word sizes
        self.tag_cloud = [
            (tag,
             int(
                 math.floor(steps - (steps - 1) * math.log(count) /
                            (math.log(max_count) or 1))))
            for tag, count in tag_cloud
        ]
        # put words in chaos
        random.shuffle(self.tag_cloud)

        # and generate the output :)

        # order the categories per name
        self.categories = list(self.categories.items())
        self.categories.sort(
            reverse=self.settings.get('REVERSE_CATEGORY_ORDER'))

        self.authors = list(self.authors.items())
        self.authors.sort()

        self._update_context(('articles', 'dates', 'tags', 'categories',
                              'tag_cloud', 'authors'))
Ejemplo n.º 51
0
    def generate_context(self):
        """change the context"""

        # return the list of files to use
        files = self.get_files(self.path, exclude=[
            'pages',
        ])
        all_articles = []
        for f in files:

            try:
                content, metadata = read_file(f, settings=self.settings)
            except Exception, e:
                warning(u'Could not process %s\n%s' % (f, str(e)))
                continue

            # if no category is set, use the name of the path as a category
            if 'category' not in metadata.keys():

                if os.path.dirname(f) == self.path:
                    category = self.settings['DEFAULT_CATEGORY']
                else:
                    category = os.path.basename(
                        os.path.dirname(f)).decode('utf-8')

                if category != '':
                    metadata['category'] = unicode(category)

            if 'date' not in metadata.keys()\
                and self.settings['FALLBACK_ON_FS_DATE']:
                metadata['date'] = datetime.fromtimestamp(os.stat(f).st_ctime)

            article = Article(content,
                              metadata,
                              settings=self.settings,
                              filename=f)
            if not is_valid_content(article, f):
                continue

            add_to_url = u''
            if 'ARTICLE_PERMALINK_STRUCTURE' in self.settings:
                article_permalink_structure = self.settings[
                    'ARTICLE_PERMALINK_STRUCTURE']
                article_permalink_structure = article_permalink_structure.lstrip(
                    '/').replace('%(', "%%(")

                # try to substitute any python datetime directive
                add_to_url = article.date.strftime(article_permalink_structure)
                # try to substitute any article metadata in rest file
                add_to_url = add_to_url % article.__dict__
                add_to_url = [slugify(i) for i in add_to_url.split('/')]
                add_to_url = os.path.join(*add_to_url)

            article.url = urlparse.urljoin(add_to_url, article.url)
            article.save_as = urlparse.urljoin(add_to_url, article.save_as)

            if article.status == "published":
                if hasattr(article, 'tags'):
                    for tag in article.tags:
                        self.tags[tag].append(article)
                all_articles.append(article)
            elif article.status == "draft":
                self.drafts.append(article)