Пример #1
0
 def render_group(group):
     return tag.ul(
         tag.li(
             tag(tag.strong(elt[0].strip('/')), render_group(elt[1])
                 ) if isinstance(elt, tuple) else tag.
             a(wiki.format_page_name(omitprefix(elt)),
               href=formatter.href.wiki(elt))) for elt in group)
Пример #2
0
    def _wiki_view(self, req, stream):
        add_stylesheet(req, 'tags/css/tractags.css')
        tags = self._page_tags(req)
        if not tags:
            return stream
        li = []
        for t in tags:
            resource = Resource('tag', t)
            anchor = render_resource_link(self.env, web_context(req, resource),
                                          resource)
            anchor = anchor(rel='tag')
            li.append(tag.li(anchor, ' '))

        # TRANSLATOR: Header label text for tag list at wiki page bottom.
        insert = tag.ul(class_='tags')(tag.li(_("Tags"), class_='header'), li)
        return stream | (
            Transformer('//div[contains(@class,"wikipage")]').after(insert))
Пример #3
0
 def render_hierarchy(group):
     return tag.ul(
         tag.li(
             tag(
                 tag.a(elt[0], href=formatter.href.wiki(elt[1])
                       ) if elt[1] else tag(elt[0]),
                 render_hierarchy(elt[2])) if len(elt) == 3 else tag.
             a('/'.join(elt[0]), href=formatter.href.wiki(elt[1])))
         for elt in group)
Пример #4
0
    def expand_macro(self, formatter, name, content):
        args, kw = parse_args(content)
        prefix = args[0].strip() if args else None
        limit = _arg_as_int(args[1].strip(), min=1) if len(args) > 1 else None
        group = kw.get('group', 'date')

        sql = """SELECT name, max(version) AS max_version,
                        max(time) AS max_time FROM wiki"""
        args = []
        if prefix:
            with self.env.db_query as db:
                sql += " WHERE name %s" % db.prefix_match()
                args.append(db.prefix_match_value(prefix))
        sql += " GROUP BY name ORDER BY max_time DESC"
        if limit:
            sql += " LIMIT %s"
            args.append(limit)

        entries_per_date = []
        prevdate = None
        for name, version, ts in self.env.db_query(sql, args):
            if 'WIKI_VIEW' not in formatter.perm('wiki', name, version):
                continue
            req = formatter.req
            date = user_time(req, format_date, from_utimestamp(ts))
            if date != prevdate:
                prevdate = date
                entries_per_date.append((date, []))
            version = int(version)
            diff_href = None
            if version > 1:
                diff_href = formatter.href.wiki(name, action='diff',
                                                version=version)
            page_name = formatter.wiki.format_page_name(name)
            entries_per_date[-1][1].append((page_name, name, version,
                                            diff_href))

        items_per_date = (
            (date, (tag.li(tag.a(page, href=formatter.href.wiki(name)),
                           tag.small(' (', tag.a(_("diff"), href=diff_href),
                                     ')') if diff_href else None,
                           '\n')
                    for page, name, version, diff_href in entries))
            for date, entries in entries_per_date)

        if group == 'date':
            out = ((tag.h3(date, class_='section'), tag.ul(entries))
                   for date, entries in items_per_date)
        else:
            out = tag.ul(entries for date, entries in items_per_date)
        return tag.div(out, class_="wikipage")
Пример #5
0
    def _post_process_request_wiki_view(self, req):
        add_stylesheet(req, 'tags/css/tractags.css')
        tags = self._page_tags(req)
        if not tags:
            return
        li = []
        for t in tags:
            resource = Resource('tag', t)
            anchor = render_resource_link(self.env, web_context(req, resource),
                                          resource)
            anchor = anchor(rel='tag')
            li.append(tag.li(anchor, ' '))

        # TRANSLATOR: Header label text for tag list at wiki page bottom.
        insert = tag.ul(class_='tags')(tag.li(_("Tags"), class_='header'), li)

        filter_lst = []
        # xpath = //div[contains(@class,"wikipage")]
        xform = JTransformer('div[class*=wikipage]')
        filter_lst.append(xform.after(Markup(insert)))

        self._add_jtransform(req, filter_lst)
        return
Пример #6
0
    def expand_macro(self, formatter, name, content):
        curpage = formatter.resource.id

        # scoped TOC (e.g. TranslateRu/TracGuide or 0.11/TracGuide ...)
        prefix = ''
        idx = curpage.find('/')
        if idx > 0:
            prefix = curpage[:idx+1]

        ws = WikiSystem(self.env)
        return tag.div(
            tag.h4(_('Table of Contents')),
            tag.ul([tag.li(tag.a(title, href=formatter.href.wiki(prefix+ref),
                                 class_=(not ws.has_page(prefix+ref) and
                                         'missing')),
                           class_=(prefix+ref == curpage and 'active'))
                    for ref, title in self.TOC]),
            class_='wiki-toc')
Пример #7
0
    def expand_macro(self, formatter, name, content):
        args, kw = parse_args(content)
        prefix = args[0].strip() if args else None
        hideprefix = args and len(args) > 1 and args[1].strip() == 'hideprefix'
        minsize = _arg_as_int(kw.get('min', 1), 'min', min=1)
        minsize_group = max(minsize, 2)
        depth = _arg_as_int(kw.get('depth', -1), 'depth', min=-1)
        format = kw.get('format', '')

        def parse_list(name):
            return [
                inc.strip() for inc in kw.get(name, '').split(':')
                if inc.strip()
            ]

        includes = parse_list('include') or ['*']
        excludes = parse_list('exclude')

        wiki = formatter.wiki
        resource = formatter.resource
        if prefix and resource and resource.realm == 'wiki':
            prefix = wiki.resolve_relative_name(prefix, resource.id)

        start = prefix.count('/') if prefix else 0

        if hideprefix:
            omitprefix = lambda page: page[len(prefix):]
        else:
            omitprefix = lambda page: page

        pages = sorted(page for page in wiki.get_pages(prefix)
                       if (depth < 0 or depth >= page.count('/') - start)
                       and 'WIKI_VIEW' in formatter.perm('wiki', page) and any(
                           fnmatchcase(page, inc)
                           for inc in includes) and not any(
                               fnmatchcase(page, exc) for exc in excludes))

        if format == 'compact':
            return tag(
                separated((tag.a(wiki.format_page_name(omitprefix(p)),
                                 href=formatter.href.wiki(p))
                           for p in pages), ', '))

        # the function definitions for the different format styles

        # the different page split formats, each corresponding to its rendering
        def split_pages_group(pages):
            """Return a list of (path elements, page_name) pairs,
            where path elements correspond to the page name (without prefix)
            splitted at Camel Case word boundaries, numbers and '/'.
            """
            page_paths = []
            for page in pages:
                path = [
                    elt.strip() for elt in self.SPLIT_RE.split(
                        self.NUM_SPLIT_RE.sub(
                            r" \1 ",
                            wiki.format_page_name(omitprefix(page),
                                                  split=True)))
                ]
                page_paths.append(([elt for elt in path if elt], page))
            return page_paths

        def split_pages_hierarchy(pages):
            """Return a list of (path elements, page_name) pairs,
            where path elements correspond to the page name (without prefix)
            splitted according to the '/' hierarchy.
            """
            return [(wiki.format_page_name(omitprefix(page)).split("/"), page)
                    for page in pages]

        # the different tree structures, each corresponding to its rendering
        def tree_group(entries):
            """Transform a flat list of entries into a tree structure.

            `entries` is a list of `(path_elements, page_name)` pairs

            Return a list organized in a tree structure, in which:
              - a leaf is a page name
              - a node is a `(key, nodes)` pairs, where:
                - `key` is the leftmost of the path elements, common to the
                  grouped (path element, page_name) entries
                - `nodes` is a list of nodes or leaves
            """
            def keyfn(args):
                elts, name = args
                return elts[0] if elts else ''

            groups = []

            for key, grouper in groupby(entries, keyfn):
                # remove key from path_elements in grouped entries for further
                # grouping
                grouped_entries = [(path_elements[1:], page_name)
                                   for path_elements, page_name in grouper]

                if key and len(grouped_entries) >= minsize_group:
                    subnodes = tree_group(sorted(grouped_entries))
                    if len(subnodes) == 1:
                        subkey, subnodes = subnodes[0]
                        node = (key + subkey, subnodes)
                        groups.append(node)
                    elif self.SPLIT_RE.match(key):
                        for elt in subnodes:
                            if isinstance(elt, tuple):
                                subkey, subnodes = elt
                                elt = (key + subkey, subnodes)
                            groups.append(elt)
                    else:
                        node = (key, subnodes)
                        groups.append(node)
                else:
                    for path_elements, page_name in grouped_entries:
                        groups.append(page_name)
            return groups

        def tree_hierarchy(entries):
            """Transform a flat list of entries into a tree structure.

            `entries` is a list of `(path_elements, page_name)` pairs

            Return a list organized in a tree structure, in which:
              - a leaf is a `(rest, page)` pair, where:
                - `rest` is the rest of the path to be shown
                - `page` is a page name
              - a node is a `(key, nodes, page)` pair, where:
                - `key` is the leftmost of the path elements, common to the
                  grouped (path element, page_name) entries
                - `page` is a page name (if one exists for that node)
                - `nodes` is a list of nodes or leaves
            """
            def keyfn(args):
                elts, name = args
                return elts[0] if elts else ''

            groups = []

            for key, grouper in groupby(entries, keyfn):
                grouped_entries = [e for e in grouper]
                sub_entries = [e for e in grouped_entries if len(e[0]) > 1]
                key_entries = [e for e in grouped_entries if len(e[0]) == 1]
                key_entry = key_entries[0] if key_entries else None
                key_page = key_entry[1] if key_entries else None

                if key and len(sub_entries) >= minsize:
                    # remove key from path_elements in grouped entries for
                    # further grouping
                    sub_entries = [(path_elements[1:], page)
                                   for path_elements, page in sub_entries]

                    subnodes = tree_hierarchy(sorted(sub_entries))
                    node = (key, key_page, subnodes)
                    groups.append(node)
                else:
                    if key_entry:
                        groups.append(key_entry)
                    groups.extend(sub_entries)
            return groups

        # the different rendering formats
        def render_group(group):
            return tag.ul(
                tag.li(
                    tag(tag.strong(elt[0].strip('/')), render_group(elt[1])
                        ) if isinstance(elt, tuple) else tag.
                    a(wiki.format_page_name(omitprefix(elt)),
                      href=formatter.href.wiki(elt))) for elt in group)

        def render_hierarchy(group):
            return tag.ul(
                tag.li(
                    tag(
                        tag.a(elt[0], href=formatter.href.wiki(elt[1])
                              ) if elt[1] else tag(elt[0]),
                        render_hierarchy(elt[2])) if len(elt) == 3 else tag.
                    a('/'.join(elt[0]), href=formatter.href.wiki(elt[1])))
                for elt in group)

        transform = {
            'group':
            lambda p: render_group(tree_group(split_pages_group(p))),
            'hierarchy':
            lambda p: render_hierarchy(tree_hierarchy(split_pages_hierarchy(p))
                                       ),
        }.get(format)

        if transform:
            titleindex = transform(pages)
        else:
            titleindex = tag.ul(
                tag.li(
                    tag.a(wiki.format_page_name(omitprefix(page)),
                          href=formatter.href.wiki(page))) for page in pages)

        return tag.div(titleindex, class_='titleindex')
Пример #8
0
 def _render_externals(self, prop):
     if not self._externals_map:
         for dummykey, value in self.svn_externals_section.options():
             value = value.split()
             if len(value) != 2:
                 self.log.warning(
                     "svn:externals entry %s doesn't contain "
                     "a space-separated key value pair, "
                     "skipping.", dummykey)
                 continue
             key, value = value
             self._externals_map[key] = value.replace('%', '%%') \
                                        .replace('$path', '%(path)s') \
                                        .replace('$rev', '%(rev)s')
     externals = []
     for external in prop.splitlines():
         elements = external.split()
         if not elements:
             continue
         localpath, rev, url = elements[0], '', elements[-1]
         if localpath.startswith('#'):
             externals.append((external, None, None, None, None))
             continue
         if len(elements) == 3:
             rev = elements[1]
             rev = rev.replace('-r', '')
         # retrieve a matching entry in the externals map
         if not self._is_abs_url(url):
             externals.append((external, None, None, None, None))
             continue
         prefix = []
         base_url = url
         while base_url:
             if base_url in self._externals_map or base_url == u'/':
                 break
             base_url, pref = posixpath.split(base_url)
             prefix.append(pref)
         href = self._externals_map.get(base_url)
         revstr = ' at revision ' + rev if rev else ''
         if not href and (url.startswith('http://')
                          or url.startswith('https://')):
             href = url.replace('%', '%%')
         if href:
             remotepath = ''
             if prefix:
                 remotepath = posixpath.join(*reversed(prefix))
             externals.append(
                 (localpath, revstr, base_url, remotepath, href % {
                     'path': remotepath,
                     'rev': rev
                 }))
         else:
             externals.append((localpath, revstr, url, None, None))
     externals_data = []
     for localpath, rev, url, remotepath, href in externals:
         label = localpath
         if url is None:
             title = ''
         elif href:
             if url:
                 url = ' in ' + url
             label += rev + url
             title = ''.join((remotepath, rev, url))
         else:
             title = _('No svn:externals configured in trac.ini')
         externals_data.append((label, href, title))
     return tag.ul([
         tag.li(tag.a(label, href=href, title=title))
         for label, href, title in externals_data
     ])
Пример #9
0
    def render_property_diff(self, name, old_context, old_props, new_context,
                             new_props, options):
        # Build 5 columns table showing modifications on merge sources
        # || source || added || removed || added (ni) || removed (ni) ||
        # || source || removed                                        ||
        rm = RepositoryManager(self.env)
        repos = rm.get_repository(old_context.resource.parent.id)

        def parse_sources(props):
            sources = {}
            value = props[name]
            lines = value.splitlines() if name == 'svn:mergeinfo' \
                                       else value.split()
            for line in lines:
                path, revs = line.split(':', 1)
                spath = _path_within_scope(repos.scope, path)
                if spath is not None:
                    inheritable, non_inheritable = _partition_inheritable(revs)
                    sources[spath] = (set(Ranges(inheritable)),
                                      set(Ranges(non_inheritable)))
            return sources

        old_sources = parse_sources(old_props)
        new_sources = parse_sources(new_props)
        # Go through new sources, detect modified ones or added ones
        blocked = name.endswith('blocked')
        added_label = [_("merged: "), _("blocked: ")][blocked]
        removed_label = [_("reverse-merged: "), _("un-blocked: ")][blocked]
        added_ni_label = _("marked as non-inheritable: ")
        removed_ni_label = _("unmarked as non-inheritable: ")

        sources = []
        changed_revs = {}
        changed_nodes = []
        for spath, (new_revs, new_revs_ni) in new_sources.iteritems():
            new_spath = spath not in old_sources
            if new_spath:
                old_revs = old_revs_ni = set()
            else:
                old_revs, old_revs_ni = old_sources.pop(spath)
            added = new_revs - old_revs
            removed = old_revs - new_revs
            # unless new revisions differ from old revisions
            if not added and not removed:
                continue
            added_ni = new_revs_ni - old_revs_ni
            removed_ni = old_revs_ni - new_revs_ni
            revs = sorted(added | removed | added_ni | removed_ni)
            try:
                node = repos.get_node(spath, revs[-1])
                changed_nodes.append((node, revs[0]))
            except NoSuchNode:
                pass
            sources.append(
                (spath, new_spath, added, removed, added_ni, removed_ni))
        if changed_nodes:
            changed_revs = repos._get_changed_revs(changed_nodes)

        def revs_link(revs, context):
            if revs:
                revs = to_ranges(revs)
                return _get_revs_link(revs.replace(',', u',\u200b'), context,
                                      spath, revs)

        modified_sources = []
        for spath, new_spath, added, removed, added_ni, removed_ni in sources:
            if spath in changed_revs:
                revs = set(changed_revs[spath])
                added &= revs
                removed &= revs
                added_ni &= revs
                removed_ni &= revs
            if added or removed:
                if new_spath:
                    status = _(" (added)")
                else:
                    status = None
                modified_sources.append(
                    (spath, [_get_source_link(spath, new_context),
                             status], added
                     and tag(added_label, revs_link(added, new_context)),
                     removed
                     and tag(removed_label, revs_link(removed, old_context)),
                     added_ni
                     and tag(added_ni_label, revs_link(added_ni, new_context)),
                     removed_ni and tag(removed_ni_label,
                                        revs_link(removed_ni, old_context))))
        # Go through remaining old sources, those were deleted
        removed_sources = []
        for spath, old_revs in old_sources.iteritems():
            removed_sources.append(
                (spath, _get_source_link(spath, old_context)))
        if modified_sources or removed_sources:
            modified_sources.sort()
            removed_sources.sort()
            changes = tag.table(tag.tbody([
                tag.tr(tag.td(c) for c in cols[1:])
                for cols in modified_sources
            ], [
                tag.tr(tag.td(src), tag.td(_('removed'), colspan=4))
                for spath, src in removed_sources
            ]),
                                class_='props')
        else:
            changes = tag.em(_(' (with no actual effect on merging)'))
        return tag.li(tag_('Property %(prop)s changed', prop=tag.strong(name)),
                      changes)
Пример #10
0
                        spamstatus = count < 0
                        self._record_action(status, type, ("ok" if spamstatus == rejected else "error"), strategy, 0)
                    else:
                        self._record_action(status, type, '', strategy, 0)
                self._record_action(status, type, '', '', 0)

            LogEntry.purge(self.env, self.purge_age)

        if score < self.min_karma:
            self.log.debug('Rejecting submission %r by "%s" (%r) because it '
                           'earned only %d karma points (%d are required).',
                           abbrev, author, req.remote_addr, score, self.min_karma)
            rejects = []
            outreasons.sort()
            for r in outreasons:
                rejects.append(tag.li(r))
            msg = tag.div(tag.ul(rejects), class_='message')

            self.reject_handler.reject_content(req, msg)

    def train(self, req, ids, spam=True, delete=False):
        environ = {}
        for name, value in req.environ.items():
            if not name.startswith('HTTP_'):
                environ[name] = value

        if not isinstance(ids, list):
            ids = [ids]
        for log_id in ids:
            start = time.time()
            entry = LogEntry.fetch(self.env, log_id)