def add_category(generator):
    tag = Tag('mozilla', generator.settings)
    for article in generator.articles:
        if not hasattr(article, 'tags'):
            setattr(article, 'tags', [
                tag,
            ])
Пример #2
0
    def _parse_metadata(self, meta):
        """Parse and sanitize metadata"""
        _DEL = object()  # Used as a sentinel
        FCNS = {
            'tags': lambda x, y: [Tag(t, y) for t in self._to_list(x)] or _DEL,
            'date': lambda x, y: get_date(x) if x else _DEL,
            'modified': lambda x, y: get_date(x) if x else _DEL,
            'category': lambda x, y: Category(x, y) if x else _DEL,
            'author': lambda x, y: Author(x, y) if x else _DEL,
            'authors':
            lambda x, y: [Author(a, y) for a in self._to_list(x)] or _DEL,
            'default': lambda x, y: x
        }

        out = {}
        for k, v in meta.items():
            k = k.lower()
            if k in self.settings['FORMATTED_FIELDS']:
                self._md.reset()
                temp = self._md.convert("\n".join(self._to_list(v)))
            else:
                temp = FCNS.get(k, FCNS["default"])(v, self.settings)

            if temp is not _DEL:
                out[k] = temp
        return out
Пример #3
0
 def test_slugify_tags_with_dots(self):
     settings = get_settings()
     settings['TAG_SUBSTITUTIONS'] = [('Fedora QA', 'fedora.qa', True)]
     settings['ARTICLE_URL'] = '{tag}/{slug}/'
     article_kwargs = self._copy_page_kwargs()
     article_kwargs['metadata']['tag'] = Tag('Fedora QA', settings)
     article_kwargs['metadata']['title'] = 'This Week in Fedora QA'
     article_kwargs['settings'] = settings
     article = Article(**article_kwargs)
     self.assertEqual(article.url, 'fedora.qa/this-week-in-fedora-qa/')
Пример #4
0
def _new_tag(tag_text, article, global_tags):
    """
    Given the TAG_TEXT, an ARTICLE it applies to, and the defaultdict
    of GLOBAL_TAGS, add the relevant tags.

    Returns the Tag(...) for TAG_TEXT.
    """
    tag = Tag(tag_text, article.settings)
    tags = getattr(article, 'tags', [])
    tags.append(tag)
    article.tags = list(set(tags))
    global_tags[tag].append(article)

    return tag
Пример #5
0
    posixize_path

try:
    from markdown import Markdown
except ImportError:
    Markdown = False  # NOQA

# Metadata processors have no way to discard an unwanted value, so we have
# them return this value instead to signal that it should be discarded later.
# This means that _filter_discardable_metadata() must be called on processed
# metadata dicts before use, to remove the items with the special value.
_DISCARD = object()
METADATA_PROCESSORS = {
    'tags':
    lambda x, y:
    ([Tag(tag, y) for tag in ensure_metadata_list(x)] or _DISCARD),
    'date':
    lambda x, y: get_date(x.replace('_', ' ')),
    'modified':
    lambda x, y: get_date(x),
    'status':
    lambda x, y: x.strip() or _DISCARD,
    'category':
    lambda x, y: _process_if_nonempty(Category, x, y),
    'author':
    lambda x, y: _process_if_nonempty(Author, x, y),
    'authors':
    lambda x, y:
    ([Author(author, y) for author in ensure_metadata_list(x)] or _DISCARD),
    'slug':
    lambda x, y: x.strip() or _DISCARD,
Пример #6
0
    def replacer(siteurl, m):
        what = m.group('what')
        value = urlparse(m.group('value'))
        path = value.path
        origin = m.group('path')

        # XXX Put this in a different location.
        if what in {'filename', 'attach'}:
            if path.startswith('/'):
                path = path[1:]
            else:
                # relative to the source path of this content
                path = content.get_relative_source_path(
                    os.path.join(content.relative_dir, path))

            if path not in content._context['filenames']:
                unquoted_path = path.replace('%20', ' ')

                if unquoted_path in content._context['filenames']:
                    path = unquoted_path

            linked_content = content._context['filenames'].get(path)
            if linked_content:
                if what == 'attach':
                    if isinstance(linked_content, Static):
                        linked_content.attach_to(content)
                    else:
                        logger.warning(
                            "%s used {attach} link syntax on a "
                            "non-static file. Use {filename} instead.",
                            content.get_relative_source_path())
                origin = '/'.join((siteurl, linked_content.url))
                origin = origin.replace('\\', '/')  # for Windows paths.
            else:
                logger.warning(
                    "Unable to find `%s`, skipping url replacement.",
                    value.geturl(),
                    extra={
                        'limit_msg': ("Other resources were not found "
                                      "and their urls not replaced")
                    })
        elif what == 'category':
            origin = '/'.join((siteurl, Category(path, content.settings).url))
        elif what == 'tag':
            origin = '/'.join((siteurl, Tag(path, content.settings).url))
        elif what == 'index':
            origin = '/'.join((siteurl, content.settings['INDEX_SAVE_AS']))
        elif what == 'author':
            origin = '/'.join((siteurl, Author(path, content.settings).url))
        else:
            logger.warning(
                "Replacement Indicator '%s' not recognized, "
                "skipping replacement", what)

        # keep all other parts, such as query, fragment, etc.
        parts = list(value)
        parts[2] = origin
        origin = urlunparse(parts)

        return ''.join(
            (m.group('markup'), m.group('quote'), origin, m.group('quote')))
Пример #7
0
except ImportError:
    asciidoc = False
try:
    from html import escape
except ImportError:
    from cgi import escape
try:
    from html.parser import HTMLParser
except ImportError:
    from HTMLParser import HTMLParser

from pelican.contents import Category, Tag, Author
from pelican.utils import get_date, pelican_open

METADATA_PROCESSORS = {
    'tags': lambda x, y: [Tag(tag, y) for tag in x.split(',')],
    'date': lambda x, y: get_date(x),
    'status': lambda x, y: x.strip(),
    'category': Category,
    'author': Author,
}


class Reader(object):
    enabled = True
    file_extensions = ['static']
    extensions = None

    def __init__(self, settings):
        self.settings = settings
Пример #8
0
DUPLICATES_DEFINITIONS_ALLOWED = {
    'tags': False,
    'date': False,
    'modified': False,
    'status': False,
    'category': False,
    'author': False,
    'save_as': False,
    'url': False,
    'authors': False,
    'slug': False
}

METADATA_PROCESSORS = {
    'tags': lambda x, y: ([
        Tag(tag, y)
        for tag in ensure_metadata_list(x)
    ] or _DISCARD),
    'date': lambda x, y: get_date(x.replace('_', ' ')),
    'modified': lambda x, y: get_date(x),
    'status': lambda x, y: x.strip() or _DISCARD,
    'category': lambda x, y: _process_if_nonempty(Category, x, y),
    'author': lambda x, y: _process_if_nonempty(Author, x, y),
    'authors': lambda x, y: ([
        Author(author, y)
        for author in ensure_metadata_list(x)
    ] or _DISCARD),
    'slug': lambda x, y: x.strip() or _DISCARD,
}

logger = logging.getLogger(__name__)
Пример #9
0
    # import the directives to have pygments support
    from pelican import rstdirectives  # NOQA
except ImportError:
    core = False
try:
    from markdown import Markdown
except ImportError:
    Markdown = False  # NOQA
import re

from pelican.contents import Category, Tag, Author
from pelican.utils import get_date, pelican_open

_METADATA_PROCESSORS = {
    'tags': lambda x, y: [Tag(tag, y) for tag in unicode(x).split(',')],
    'date': lambda x, y: get_date(x),
    'status': lambda x, y: unicode.strip(x),
    'category': Category,
    'author': Author,
}


class Reader(object):
    enabled = True
    extensions = None

    def __init__(self, settings):
        self.settings = settings

    def process_metadata(self, name, value):
Пример #10
0
logger = logging.getLogger(__name__)

HEADER_RE = re.compile(
    r"^---$"
    r"(?P<metadata>.+?)"
    r"^(?:---|\.\.\.)$"
    r"(?P<content>.*)", re.MULTILINE | re.DOTALL)

DUPES_NOT_ALLOWED = \
    set(k for k, v in DUPLICATES_DEFINITIONS_ALLOWED.items() if not v) - \
    {"tags", "authors"}

_DEL = object()

YAML_METADATA_PROCESSORS = {
    'tags': lambda x, y: [Tag(_strip(t), y) for t in _to_list(x)] or _DEL,
    'date': lambda x, y: _parse_date(x),
    'modified': lambda x, y: _parse_date(x),
    'category': lambda x, y: Category(_strip(x), y) if x else _DEL,
    'author': lambda x, y: Author(_strip(x), y) if x else _DEL,
    'authors':
    lambda x, y: [Author(_strip(a), y) for a in _to_list(x)] or _DEL,
    'slug': lambda x, y: _strip(x) or _DEL,
    'save_as': lambda x, y: _strip(x) or _DEL,
    'status': lambda x, y: _strip(x) or _DEL,
}


def _strip(obj):
    return str(obj if obj is not None else '').strip()
Пример #11
0
# them return this value instead to signal that it should be discarded later.
# This means that _filter_discardable_metadata() must be called on processed
# metadata dicts before use, to remove the items with the special value.
_DISCARD = object()


def _process_if_nonempty(processor, name, settings):
    """Removes extra whitespace from name and applies a metadata processor.
    If name is empty or all whitespace, returns _DISCARD instead.
    """
    name = name.strip()
    return processor(name, settings) if name else _DISCARD


METADATA_PROCESSORS = {
    'tags': lambda x, y: [Tag(tag, y) for tag in strip_split(x)] or _DISCARD,
    'date': lambda x, y: get_date(x.replace('_', ' ')),
    'modified': lambda x, y: get_date(x),
    'status': lambda x, y: x.strip() or _DISCARD,
    'category': lambda x, y: _process_if_nonempty(Category, x, y),
    'author': lambda x, y: _process_if_nonempty(Author, x, y),
    'authors': lambda x, y: [Author(a, y) for a in strip_split(x)] or _DISCARD,
    'slug': lambda x, y: x.strip() or _DISCARD,
}


def _filter_discardable_metadata(metadata):
    """Return a copy of a dict, minus any items marked as discardable."""
    return {name: val for name, val in metadata.items() if val is not _DISCARD}