Esempio n. 1
0
 def render(self, context):
     try:
         cls = ''
         url = self.href
         page = context['page']
         if self.is_relative_link(url):
             if url.startswith('_files/'):
                 filename = file_url_to_name(url)
                 url = reverse('pages:file-info', args=[page.pretty_slug,
                                                    filename])
                 try:
                     file = PageFile.objects.get(slug__exact=page.slug,
                                                 name__exact=filename)
                     cls = ' class="file_%s"' % file.rough_type
                 except PageFile.DoesNotExist:
                     cls = ' class="missing_link"'
             else:
                 try:
                     page = Page.objects.get(slug__exact=slugify(url))
                     url = reverse('pages:show', args=[page.pretty_slug])
                 except Page.DoesNotExist:
                     cls = ' class="missing_link"'
                     # Convert to proper URL: My%20page -> My_page
                     url = name_to_url(url_to_name(url))
                     url = reverse('pages:show', args=[url])
         return '<a href="%s"%s>%s</a>' % (url, cls,
                                           self.nodelist.render(context))
     except:
         return ''
def extract_included_tags(html):
    """
    Args:
        html: A string containing an HTML5 fragment.

    Returns:
        A list of the included tag slugs (lowercased).
    """
    from tags.models import slugify

    parser = html5lib.HTMLParser(
        tree=html5lib.treebuilders.getTreeBuilder("lxml"),
        namespaceHTMLElements=False)
    # Wrap to make the tree lookup easier
    tree = parser.parseFragment('<div>%s</div>' % html)[0]
    a_s = tree.xpath('//a')

    # Grab the link source if it's an included page
    l = []
    for a in a_s:
        if _is_included_tag(a):
            try:
                item = slugify(url_to_name(a.attrib.get('href'))[TAGS_PATH_LEN:].lower())
            except UnicodeDecodeError:
                continue
            l.append(item)
    return l
Esempio n. 3
0
 def __init__(self, parser, token, *args, **kwargs):
     super(IncludePageNode, self).__init__(*args, **kwargs)
     bits = token.split_contents()
     if len(bits) < 2:
         raise template.TemplateSyntaxError, ('%r tag requires at least one'
                                 ' argument' % token.contents.split()[0])
     page_name = bits[1]
     if is_quoted(page_name):
         page_name = unescape_string_literal(page_name)
     self.page_name = url_to_name(page_name)
     self.args = bits[2:]
def extract_included_pagenames(html):
    """
    Args:
        html: A string containing an HTML5 fragment.

    Returns:
        A list of the included page names.
    """
    parser = html5lib.HTMLParser(
        tree=html5lib.treebuilders.getTreeBuilder("lxml"),
        namespaceHTMLElements=False)
    # Wrap to make the tree lookup easier
    tree = parser.parseFragment('<div>%s</div>' % html)[0]
    a_s = tree.xpath('//a')

    # Grab the link source if it's an included page
    l = []
    for a in a_s:
        if _is_included_page(a):
            l.append(url_to_name(a.attrib.get('href')))
    return l
def extract_internal_links(html):
    """
    Args:
        html: A string containing an HTML5 fragment.

    Returns:
        A dictionary of the linked-to page names and the number of times that
        link has been made in this HTML.  E.g.
        {'Downtown Park': 3, 'Rollercoaster': 1}
    """
    parser = html5lib.HTMLParser(
        tree=html5lib.treebuilders.getTreeBuilder("lxml"),
        namespaceHTMLElements=False)
    # Wrap to make the tree lookup easier
    tree = parser.parseFragment('<div>%s</div>' % html)[0]
    a_s = tree.xpath('//a')

    # Grab the links if they're not anchors or external.
    d = {}
    for a in a_s:
        if 'href' not in a.attrib:
            continue
        href = a.attrib['href']
        if (not _is_absolute(href) and not _is_anchor_link(href) and
            not _is_plugin(a) and not _invalid(href)):
            try:
                if not slugify(href) in d:
                    d[slugify(href)] = (url_to_name(href), 1)
                else:
                    name, count = d[slugify(href)]
                    d[slugify(href)] = (name, count + 1)
            except UnicodeDecodeError:
                pass

    # Format the result correctly.
    links = {}
    for _, (name, count) in d.iteritems():
        links[name] = count
    
    return links
Esempio n. 6
0
 def test_url_to_name(self):
     self.assertEqual(url_to_name('Front_Page'), 'Front Page')
     self.assertEqual(url_to_name('Ben_%26_Jerry%27s'), "Ben & Jerry's")