示例#1
0
 def test_with_limit(self):
     self.assertEqual(g.handle_paging(10, 0), (10, 0, 0))
     self.assertEqual(g.handle_paging(10, 2), (10, 2, 20))
     # handle paging must not mess up user preferences
     self.assertEqual(c.user.get_pref('results_per_page'), None)
     # maximum enforced
     self.assertEqual(g.handle_paging(99999999, 0), (500, 0, 0))
示例#2
0
 def test_with_limit(self):
     self.assertEqual(g.handle_paging(10, 0), (10, 0, 0))
     self.assertEqual(g.handle_paging(10, 2), (10, 2, 20))
     # handle paging must not mess up user preferences
     self.assertEqual(c.user.get_pref('results_per_page'), None)
     # maximum enforced
     self.assertEqual(g.handle_paging(99999999, 0), (500, 0, 0))
示例#3
0
    def test_without_limit(self):
        # default limit = 25
        self.assertEqual(g.handle_paging(None, 0), (25, 0, 0))
        self.assertEqual(g.handle_paging(None, 2), (25, 2, 50))
        # handle paging must not mess up user preferences
        self.assertEqual(c.user.get_pref('results_per_page'), None)

        # user has page size preference
        c.user.set_pref('results_per_page', 100)
        self.assertEqual(g.handle_paging(None, 0), (100, 0, 0))
        self.assertEqual(g.handle_paging(None, 2), (100, 2, 200))
        # handle paging must not mess up user preferences
        self.assertEqual(c.user.get_pref('results_per_page'), 100)
示例#4
0
    def test_without_limit(self):
        # default limit = 25
        self.assertEqual(g.handle_paging(None, 0), (25, 0, 0))
        self.assertEqual(g.handle_paging(None, 2), (25, 2, 50))
        # handle paging must not mess up user preferences
        self.assertEqual(c.user.get_pref('results_per_page'), None)

        # user has page size preference
        c.user.set_pref('results_per_page', 100)
        self.assertEqual(g.handle_paging(None, 0), (100, 0, 0))
        self.assertEqual(g.handle_paging(None, 2), (100, 2, 200))
        # handle paging must not mess up user preferences
        self.assertEqual(c.user.get_pref('results_per_page'), 100)
示例#5
0
    def test_without_limit_with_default(self):
        # default limit is not used when explicitly provided
        self.assertEqual(g.handle_paging(None, 0, 30), (30, 0, 0))
        self.assertEqual(g.handle_paging(None, 2, 30), (30, 2, 60))
        # handle paging must not mess up user preferences
        self.assertEqual(c.user.get_pref('results_per_page'), None)

        # user has page size preference, which is not affected by default
        c.user.set_pref('results_per_page', 25)
        self.assertEqual(g.handle_paging(None, 0, 30), (25, 0, 0))
        self.assertEqual(g.handle_paging(None, 2, 30), (25, 2, 50))
        # handle paging must not mess up user preferences
        self.assertEqual(c.user.get_pref('results_per_page'), 25)
示例#6
0
    def test_without_limit_with_default(self):
        # default limit is not used when explicitly provided
        self.assertEqual(g.handle_paging(None, 0, 30), (30, 0, 0))
        self.assertEqual(g.handle_paging(None, 2, 30), (30, 2, 60))
        # handle paging must not mess up user preferences
        self.assertEqual(c.user.get_pref('results_per_page'), None)

        # user has page size preference, which is not affected by default
        c.user.set_pref('results_per_page', 25)
        self.assertEqual(g.handle_paging(None, 0, 30), (25, 0, 0))
        self.assertEqual(g.handle_paging(None, 2, 30), (25, 2, 50))
        # handle paging must not mess up user preferences
        self.assertEqual(c.user.get_pref('results_per_page'), 25)
示例#7
0
 def index(self, page=0, limit=DEFAULT_PAGE_LIMIT, **kw):
     c.revision_widget = self.revision_widget
     c.page_list = self.page_list
     result = dict(commit=self._commit)
     if self._commit:
         result.update(self._commit.context())
     tree = self._commit.tree
     limit, page, start = g.handle_paging(limit, page,
                                          default=self.DEFAULT_PAGE_LIMIT)
     diffs = self._commit.paged_diffs(start=start, end=start + limit, onlyChangedFiles=True)
     result['artifacts'] = []
     for t in ('added', 'removed', 'changed', 'copied', 'renamed'):
         for f in diffs[t]:
             if t in ('copied', 'renamed'):
                 filepath = f['new']
             else:
                 filepath = f
             is_text = filepath and tree.get_blob_by_path(filepath) and tree.get_blob_by_path(filepath).has_html_view
             result['artifacts'].append(
                 (t, f, 'blob' if tree.get_blob_by_path(f) else 'tree', is_text)
             )
     count = diffs['total']
     result.update(dict(page=page, limit=limit, count=count))
     # Sort the result['artifacts'] which is in format as below -
     # [('added', u'aaa.txt', 'blob', True),
     # ('added', u'eee.txt', 'blob', True),
     # ('added', u'ggg.txt', 'blob', True),
     # ('removed', u'bbb.txt', 'tree', None),
     # ('removed', u'ddd.txt', 'tree', None),
     # ('changed', u'ccc.txt', 'blob', True)]
     result['artifacts'].sort(key=lambda x: x[1]['old'] if(type(x[1]) == dict) else x[1])
     return result
示例#8
0
    def adminlist(self, sort='alpha', limit=25, page=0, **kw):
        limit, page, start = g.handle_paging(limit, page)

        pq = M.Project.query.find(
            dict(neighborhood_id=self.neighborhood._id, deleted=False))
        if sort == 'alpha':
            pq.sort('name')
        else:
            pq.sort('last_updated', pymongo.DESCENDING)
        count = pq.count()
        projects = pq.skip(start).limit(int(limit)).all()

        entries = []
        for proj in projects:
            admin_role = M.ProjectRole.query.get(
                project_id=proj.root_project._id, name='Admin')
            if admin_role is None:
                continue
            user_role_list = M.ProjectRole.query.find(
                dict(project_id=proj.root_project._id, name=None)).all()
            for ur in user_role_list:
                if ur.user is not None and admin_role._id in ur.roles:
                    entries.append({'project': proj, 'user': ur.user})

        set_nav(self.neighborhood)
        return dict(
            entries=entries,
            sort=sort,
            limit=limit,
            page=page,
            count=count,
            page_list=W.page_list,
            neighborhood=self.neighborhood,
        )
示例#9
0
文件: discuss.py 项目: apache/allura
 def index(self, **kw):
     kw = WidgetConfig.post_filter.validate(kw, None)
     page = kw.pop('page', 0)
     limit = kw.pop('limit', 50)
     status = kw.pop('status', 'pending')
     username = kw.pop('username', None)
     flag = kw.pop('flag', None)
     c.post_filter = WidgetConfig.post_filter
     c.moderate_posts = WidgetConfig.moderate_posts
     c.page_list = WidgetConfig.page_list
     query = dict(
         discussion_id=self.discussion._id,
         deleted=False)
     if status != '-':
         query['status'] = status
     if flag:
         query['flags'] = {'$gte': int(flag)}
     if username:
         filtered_user = User.by_username(username)
         query['author_id'] = filtered_user._id if filtered_user else None
     q = self.PostModel.query.find(query).sort('timestamp', -1)
     count = q.count()
     limit, page, start = g.handle_paging(limit, page or 0, default=50)
     q = q.skip(start)
     q = q.limit(limit)
     pgnum = (page // limit) + 1
     pages = (count // limit) + 1
     return dict(discussion=self.discussion,
                 posts=q, page=page, limit=limit,
                 status=status, flag=flag, username=username,
                 pgnum=pgnum, pages=pages, count=count)
示例#10
0
文件: root.py 项目: xmonader/allura
 def index(self, limit=None, page=0, **kw):
     limit, page, start = g.handle_paging(limit, int(page))
     topics = model.Forum.thread_class().query.find(
         dict(discussion_id=self.forum._id))
     topics = topics.sort([('flags', pymongo.DESCENDING),
                           ('last_post_date', pymongo.DESCENDING)])
     topics = topics.skip(start).limit(limit)
     count = topics.count()
     json = {}
     json['forum'] = self.forum.__json__(
         limit=1
     )  # small limit since we're going to "del" the threads anyway
     # topics replace threads here
     del json['forum']['threads']
     json['forum']['topics'] = [
         dict(_id=t._id,
              subject=t.subject,
              num_replies=t.num_replies,
              num_views=t.num_views,
              url=h.absurl('/rest' + t.url()),
              last_post=t.last_post) for t in topics if t.status == 'ok'
     ]
     json['count'] = count
     json['page'] = page
     json['limit'] = limit
     return json
示例#11
0
文件: discuss.py 项目: 99Kies/allura
 def index(self, **kw):
     kw = WidgetConfig.post_filter.validate(kw, None)
     page = kw.pop('page', 0)
     limit = kw.pop('limit', 50)
     status = kw.pop('status', 'pending')
     username = kw.pop('username', None)
     flag = kw.pop('flag', None)
     c.post_filter = WidgetConfig.post_filter
     c.moderate_posts = WidgetConfig.moderate_posts
     c.page_list = WidgetConfig.page_list
     query = dict(discussion_id=self.discussion._id, deleted=False)
     if status != '-':
         query['status'] = status
     if flag:
         query['flags'] = {'$gte': int(flag)}
     if username:
         filtered_user = User.by_username(username)
         query['author_id'] = filtered_user._id if filtered_user else None
     q = self.PostModel.query.find(query).sort('timestamp', -1)
     count = q.count()
     limit, page, start = g.handle_paging(limit, page or 0, default=50)
     q = q.skip(start)
     q = q.limit(limit)
     pgnum = (page // limit) + 1
     pages = (count // limit) + 1
     return dict(discussion=self.discussion,
                 posts=q,
                 page=page,
                 limit=limit,
                 status=status,
                 flag=flag,
                 username=username,
                 pgnum=pgnum,
                 pages=pages,
                 count=count)
示例#12
0
文件: project.py 项目: apache/allura
    def adminlist(self, sort='alpha', limit=25, page=0, **kw):
        limit, page, start = g.handle_paging(limit, page)

        pq = M.Project.query.find(
            dict(neighborhood_id=self.neighborhood._id, deleted=False))
        if sort == 'alpha':
            pq.sort('name')
        else:
            pq.sort('last_updated', pymongo.DESCENDING)
        count = pq.count()
        projects = pq.skip(start).limit(int(limit)).all()

        entries = []
        for proj in projects:
            admin_role = M.ProjectRole.query.get(
                project_id=proj.root_project._id, name='Admin')
            if admin_role is None:
                continue
            user_role_list = M.ProjectRole.query.find(
                dict(project_id=proj.root_project._id, name=None)).all()
            for ur in user_role_list:
                if ur.user is not None and admin_role._id in ur.roles:
                    entries.append({'project': proj, 'user': ur.user})

        set_nav(self.neighborhood)
        return dict(entries=entries,
                    sort=sort,
                    limit=limit, page=page, count=count,
                    page_list=W.page_list,
                    neighborhood=self.neighborhood,
                    )
示例#13
0
    def _search(self,
                model,
                fields,
                add_fields,
                q=None,
                f=None,
                page=0,
                limit=None,
                **kw):
        all_fields = fields + [(fld, fld) for fld in add_fields]
        c.search_form = W.admin_search_form(all_fields)
        c.page_list = W.page_list
        c.page_size = W.page_size
        count = 0
        objects = []
        limit, page, start = g.handle_paging(limit, page, default=25)
        if q:
            match = search.site_admin_search(model,
                                             q,
                                             f,
                                             rows=limit,
                                             start=start)
            if match:
                count = match.hits
                objects = match.docs

                ids = [obj['id'] for obj in objects]
                mongo_objects = search.mapped_artifacts_from_index_ids(
                    ids, model)
                for i in range(len(objects)):
                    obj = objects[i]
                    _id = obj['id'].split('#')[1]
                    obj['object'] = mongo_objects.get(_id)
                # Some objects can be deleted, but still have index in solr, should skip those
                objects = [o for o in objects if o.get('object')]

        def convert_fields(obj):
            # throw the type away (e.g. '_s' from 'url_s')
            result = {}
            for k, val in six.iteritems(obj):
                name = k.rsplit('_', 1)
                if len(name) == 2:
                    name = name[0]
                else:
                    name = k
                result[name] = val
            return result

        return {
            'q': q,
            'f': f,
            'objects': list(map(convert_fields, objects)),
            'count': count,
            'page': page,
            'limit': limit,
            'fields': fields,
            'additional_fields': add_fields,
            'type_s': model.type_s,
        }
示例#14
0
文件: root.py 项目: apache/allura
 def index(self, limit=None, page=0, **kw):
     limit, page, start = g.handle_paging(limit, int(page))
     json_data = {}
     json_data['topic'] = self.topic.__json__(limit=limit, page=page)
     json_data['count'] = self.topic.query_posts(status='ok').count()
     json_data['page'] = page
     json_data['limit'] = limit
     return json_data
示例#15
0
文件: root.py 项目: xmonader/allura
 def index(self, limit=None, page=0, **kw):
     limit, page, start = g.handle_paging(limit, int(page))
     json_data = {}
     json_data['topic'] = self.topic.__json__(limit=limit, page=page)
     json_data['count'] = self.topic.query_posts(status='ok').count()
     json_data['page'] = page
     json_data['limit'] = limit
     return json_data
示例#16
0
文件: project.py 项目: apache/allura
    def index(self, sort='alpha', limit=25, page=0, **kw):
        text = None
        if self.neighborhood.use_wiki_page_as_root:
            default_wiki_page = get_default_wiki_page()
            if default_wiki_page:
                text = default_wiki_page.html_text
        elif self.neighborhood.redirect:
            redirect(self.neighborhood.redirect)
        elif not self.neighborhood.has_home_tool:
            mount = c.project.ordered_mounts()[0]
            if mount is not None:
                if 'ac' in mount:
                    redirect(mount['ac'].options.mount_point + '/')
                elif 'sub' in mount:
                    redirect(mount['sub'].url())
            else:
                redirect(c.project.app_configs[0].options.mount_point + '/')
        else:
            text=g.markdown.cached_convert(
                self.neighborhood, 'homepage'),

        c.project_summary = W.project_summary
        c.page_list = W.page_list
        limit, page, start = g.handle_paging(limit, page)
        pq = M.Project.query.find(dict(
            neighborhood_id=self.neighborhood._id,
            deleted=False,
            is_nbhd_project=False,
        ))
        if sort == 'alpha':
            pq.sort('name')
        else:
            pq.sort('last_updated', pymongo.DESCENDING)
        count = pq.count()
        nb_max_projects = self.neighborhood.get_max_projects()
        projects = pq.skip(start).limit(int(limit)).all()
        categories = M.ProjectCategory.query.find(
            {'parent_id': None}).sort('name').all()
        c.custom_sidebar_menu = []
        if h.has_access(self.neighborhood, 'register')() and (nb_max_projects is None or count < nb_max_projects):
            c.custom_sidebar_menu += [
                SitemapEntry('Add a Project', self.neighborhood.url()
                             + 'add_project', ui_icon=g.icons['add']),
                SitemapEntry('')
            ]
        c.custom_sidebar_menu = c.custom_sidebar_menu + [
            SitemapEntry(cat.label, self.neighborhood.url() + 'browse/' + cat.name) for cat in categories
        ]
        return dict(neighborhood=self.neighborhood,
                    title="Welcome to " + self.neighborhood.name,
                    text=text,
                    projects=projects,
                    sort=sort,
                    limit=limit, page=page, count=count)
示例#17
0
    def index(self, sort='alpha', limit=25, page=0, **kw):
        text = None
        if self.neighborhood.use_wiki_page_as_root:
            default_wiki_page = get_default_wiki_page()
            if default_wiki_page:
                text = default_wiki_page.html_text
        elif self.neighborhood.redirect:
            redirect(self.neighborhood.redirect)
        elif not self.neighborhood.has_home_tool:
            mount = c.project.ordered_mounts()[0]
            if mount is not None:
                if 'ac' in mount:
                    redirect(mount['ac'].options.mount_point + '/')
                elif 'sub' in mount:
                    redirect(mount['sub'].url())
            else:
                redirect(c.project.app_configs[0].options.mount_point + '/')
        else:
            text=g.markdown.cached_convert(
                self.neighborhood, 'homepage'),

        c.project_summary = W.project_summary
        c.page_list = W.page_list
        limit, page, start = g.handle_paging(limit, page)
        pq = M.Project.query.find(dict(
            neighborhood_id=self.neighborhood._id,
            deleted=False,
            is_nbhd_project=False,
        ))
        if sort == 'alpha':
            pq.sort('name')
        else:
            pq.sort('last_updated', pymongo.DESCENDING)
        count = pq.count()
        nb_max_projects = self.neighborhood.get_max_projects()
        projects = pq.skip(start).limit(int(limit)).all()
        categories = M.ProjectCategory.query.find(
            {'parent_id': None}).sort('name').all()
        c.custom_sidebar_menu = []
        if h.has_access(self.neighborhood, 'register')() and (nb_max_projects is None or count < nb_max_projects):
            c.custom_sidebar_menu += [
                SitemapEntry('Add a Project', self.neighborhood.url()
                             + 'add_project', ui_icon=g.icons['add']),
                SitemapEntry('')
            ]
        c.custom_sidebar_menu = c.custom_sidebar_menu + [
            SitemapEntry(cat.label, self.neighborhood.url() + 'browse/' + cat.name) for cat in categories
        ]
        return dict(neighborhood=self.neighborhood,
                    title="Welcome to " + self.neighborhood.name,
                    text=text,
                    projects=projects,
                    sort=sort,
                    limit=limit, page=page, count=count)
示例#18
0
文件: main.py 项目: apache/allura
 def index(self, page=0, limit=None, **kw):
     query_filter = dict(app_config_id=c.app.config._id)
     if not has_access(c.app, 'write')():
         query_filter['state'] = 'published'
     q = BM.BlogPost.query.find(query_filter)
     post_count = q.count()
     limit, page, _ = g.handle_paging(limit, page)
     limit, page = h.paging_sanitizer(limit, page, post_count)
     posts = q.sort('timestamp', pymongo.DESCENDING) \
              .skip(page * limit).limit(limit)
     c.form = W.preview_post_form
     c.pager = W.pager
     return dict(posts=posts, page=page, limit=limit, count=post_count)
示例#19
0
文件: main.py 项目: 99Kies/allura
 def index(self, page=0, limit=None, **kw):
     query_filter = dict(app_config_id=c.app.config._id)
     if not has_access(c.app, 'write')():
         query_filter['state'] = 'published'
     q = BM.BlogPost.query.find(query_filter)
     post_count = q.count()
     limit, page, _ = g.handle_paging(limit, page)
     limit, page = h.paging_sanitizer(limit, page, post_count)
     posts = q.sort('timestamp', pymongo.DESCENDING) \
              .skip(page * limit).limit(limit)
     c.form = W.preview_post_form
     c.pager = W.pager
     return dict(posts=posts, page=page, limit=limit, count=post_count)
示例#20
0
 def index(self, sort='alpha', limit=25, page=0, **kw):
     c.project_summary = W.project_summary
     c.page_list = W.page_list
     limit, page, start = g.handle_paging(limit, page)
     projects, count = self._find_projects(
         sort=sort, limit=limit, start=start)
     title = self._build_title()
     c.custom_sidebar_menu = self._build_nav()
     return dict(projects=projects,
                 title=title,
                 text=None,
                 neighborhood=self.neighborhood,
                 sort=sort,
                 limit=limit, page=page, count=count)
示例#21
0
文件: project.py 项目: apache/allura
 def index(self, sort='alpha', limit=25, page=0, **kw):
     c.project_summary = W.project_summary
     c.page_list = W.page_list
     limit, page, start = g.handle_paging(limit, page)
     projects, count = self._find_projects(
         sort=sort, limit=limit, start=start)
     title = self._build_title()
     c.custom_sidebar_menu = self._build_nav()
     return dict(projects=projects,
                 title=title,
                 text=None,
                 neighborhood=self.neighborhood,
                 sort=sort,
                 limit=limit, page=page, count=count)
示例#22
0
文件: main.py 项目: 99Kies/allura
 def index(self, page=0, limit=None, **kw):
     c.form = W.view_post_form
     c.attachment_list = W.attachment_list
     c.subscribe_form = W.subscribe_form
     c.thread = W.thread
     post_count = self.post.discussion_thread.post_count
     limit, page, _ = g.handle_paging(limit, page)
     limit, page = h.paging_sanitizer(limit, page, post_count)
     version = kw.pop('version', None)
     post = self._get_version(version)
     base_post = self.post
     subscribed = M.Mailbox.subscribed(artifact=self.post)
     return dict(post=post, base_post=base_post,
                 page=page, limit=limit, count=post_count,
                 subscribed=subscribed)
示例#23
0
文件: discuss.py 项目: 99Kies/allura
 def index(self, limit=None, page=0, count=0, **kw):
     c.thread = self.W.thread
     c.thread_header = self.W.thread_header
     limit, page, start = g.handle_paging(limit, page)
     self.thread.num_views += 1
     # the update to num_views shouldn't affect it
     M.session.artifact_orm_session._get().skip_mod_date = True
     M.session.artifact_orm_session._get().skip_last_updated = True
     count = self.thread.query_posts(page=page, limit=int(limit)).count()
     return dict(discussion=self.thread.discussion,
                 thread=self.thread,
                 page=int(page),
                 count=int(count),
                 limit=int(limit),
                 show_moderate=kw.get('show_moderate'))
示例#24
0
文件: main.py 项目: apache/allura
 def index(self, page=0, limit=None, **kw):
     c.form = W.view_post_form
     c.attachment_list = W.attachment_list
     c.subscribe_form = W.subscribe_form
     c.thread = W.thread
     post_count = self.post.discussion_thread.post_count
     limit, page, _ = g.handle_paging(limit, page)
     limit, page = h.paging_sanitizer(limit, page, post_count)
     version = kw.pop('version', None)
     post = self._get_version(version)
     base_post = self.post
     subscribed = M.Mailbox.subscribed(artifact=self.post)
     return dict(post=post, base_post=base_post,
                 page=page, limit=limit, count=post_count,
                 subscribed=subscribed)
示例#25
0
文件: discuss.py 项目: apache/allura
 def index(self, limit=None, page=0, count=0, **kw):
     c.thread = self.W.thread
     c.thread_header = self.W.thread_header
     limit, page, start = g.handle_paging(limit, page)
     self.thread.num_views += 1
     # the update to num_views shouldn't affect it
     M.session.artifact_orm_session._get().skip_mod_date = True
     M.session.artifact_orm_session._get().skip_last_updated = True
     count = self.thread.query_posts(page=page, limit=int(limit)).count()
     return dict(discussion=self.thread.discussion,
                 thread=self.thread,
                 page=int(page),
                 count=int(count),
                 limit=int(limit),
                 show_moderate=kw.get('show_moderate'))
示例#26
0
    def _search(self, model, fields, add_fields, q=None, f=None, page=0, limit=None, **kw):
        all_fields = fields + [(fld, fld) for fld in add_fields]
        c.search_form = W.admin_search_form(all_fields)
        c.page_list = W.page_list
        c.page_size = W.page_size
        count = 0
        objects = []
        limit, page, start = g.handle_paging(limit, page, default=25)
        if q:
            match = search.site_admin_search(model, q, f, rows=limit, start=start)
            if match:
                count = match.hits
                objects = match.docs

                ids = [obj['id'] for obj in objects]
                mongo_objects = search.mapped_artifacts_from_index_ids(ids, model)
                for i in range(len(objects)):
                    obj = objects[i]
                    _id = obj['id'].split('#')[1]
                    obj['object'] = mongo_objects.get(_id)
                # Some objects can be deleted, but still have index in solr, should skip those
                objects = [o for o in objects if o.get('object')]

        def convert_fields(obj):
            # throw the type away (e.g. '_s' from 'url_s')
            result = {}
            for k,val in obj.iteritems():
                name = k.rsplit('_', 1)
                if len(name) == 2:
                    name = name[0]
                else:
                    name = k
                result[name] = val
            return result

        return {
            'q': q,
            'f': f,
            'objects': map(convert_fields, objects),
            'count': count,
            'page': page,
            'limit': limit,
            'fields': fields,
            'additional_fields': add_fields,
            'type_s': model.type_s,
        }
示例#27
0
 def index(self, threads=None, limit=None, page=0, count=0, **kw):
     if self.discussion.deleted:
         redirect(self.discussion.url() + 'deleted')
     limit, page, start = g.handle_paging(limit, page)
     if not c.user.is_anonymous():
         c.subscribed = M.Mailbox.subscribed(artifact=self.discussion)
         c.tool_subscribed = M.Mailbox.subscribed()
     threads = DM.ForumThread.query.find(dict(discussion_id=self.discussion._id, num_replies={'$gt': 0})) \
                                   .sort([('flags', pymongo.DESCENDING), ('last_post_date', pymongo.DESCENDING)])
     c.discussion = self.W.discussion
     c.discussion_header = self.W.discussion_header
     c.whole_forum_subscription_form = self.W.subscribe_form
     return dict(discussion=self.discussion,
                 count=threads.count(),
                 threads=threads.skip(start).limit(int(limit)).all(),
                 limit=limit,
                 page=page)
示例#28
0
文件: forum.py 项目: apache/allura
 def index(self, threads=None, limit=None, page=0, count=0, **kw):
     if self.discussion.deleted:
         redirect(self.discussion.url() + 'deleted')
     limit, page, start = g.handle_paging(limit, page)
     if not c.user.is_anonymous():
         c.subscribed = M.Mailbox.subscribed(artifact=self.discussion)
         c.tool_subscribed = M.Mailbox.subscribed()
     threads = DM.ForumThread.query.find(dict(discussion_id=self.discussion._id, num_replies={'$gt': 0})) \
                                   .sort([('flags', pymongo.DESCENDING), ('last_post_date', pymongo.DESCENDING)])
     c.discussion = self.W.discussion
     c.discussion_header = self.W.discussion_header
     c.whole_forum_subscription_form = self.W.subscribe_form
     return dict(
         discussion=self.discussion,
         count=threads.count(),
         threads=threads.skip(start).limit(int(limit)).all(),
         limit=limit,
         page=page)
示例#29
0
文件: main.py 项目: xmonader/allura
    def index(self, page=0, limit=None, **kw):
        c.page_list = W.page_list
        c.page_size = W.page_size
        limit, pagenum, start = g.handle_paging(limit, page, default=100)
        p = {'app_config_id': c.app.config._id}
        if not has_access(c.app, 'view_private'):
            p['private'] = False
        short_urls = (ShortUrl.query.find(p))
        count = short_urls.count()

        short_urls = short_urls.skip(start).limit(limit)

        return {
            'short_urls': short_urls,
            'limit': limit,
            'pagenum': pagenum,
            'count': count,
            'url_len': len(ShortUrl.build_short_url(c.app, short_name='')),
        }
示例#30
0
文件: root.py 项目: apache/allura
 def index(self, limit=None, page=0, **kw):
     limit, page, start = g.handle_paging(limit, int(page))
     forums = model.Forum.query.find(dict(
         app_config_id=c.app.config._id,
         parent_id=None, deleted=False)
     ).sort([('shortname', pymongo.ASCENDING)]).skip(start).limit(limit)
     count = forums.count()
     json = dict(forums=[dict(_id=f._id,
                              name=f.name,
                              shortname=f.shortname,
                              description=f.description,
                              num_topics=f.num_topics,
                              last_post=f.last_post,
                              url=h.absurl('/rest' + f.url()))
                         for f in forums if has_access(f, 'read')])
     json['limit'] = limit
     json['page'] = page
     json['count'] = count
     return json
示例#31
0
文件: main.py 项目: apache/allura
    def index(self, page=0, limit=None, **kw):
        c.page_list = W.page_list
        c.page_size = W.page_size
        limit, pagenum, start = g.handle_paging(limit, page, default=100)
        p = {'app_config_id': c.app.config._id}
        if not has_access(c.app, 'view_private'):
            p['private'] = False
        short_urls = (ShortUrl.query.find(p))
        count = short_urls.count()

        short_urls = short_urls.skip(start).limit(limit)

        return {
            'short_urls': short_urls,
            'limit': limit,
            'pagenum': pagenum,
            'count': count,
            'url_len': len(ShortUrl.build_short_url(c.app, short_name='')),
        }
示例#32
0
文件: root.py 项目: xmonader/allura
 def index(self, limit=None, page=0, **kw):
     limit, page, start = g.handle_paging(limit, int(page))
     forums = model.Forum.query.find(
         dict(app_config_id=c.app.config._id, parent_id=None,
              deleted=False)).sort([('shortname', pymongo.ASCENDING)
                                    ]).skip(start).limit(limit)
     count = forums.count()
     json = dict(forums=[
         dict(_id=f._id,
              name=f.name,
              shortname=f.shortname,
              description=f.description,
              num_topics=f.num_topics,
              last_post=f.last_post,
              url=h.absurl('/rest' + f.url())) for f in forums
         if has_access(f, 'read')
     ])
     json['limit'] = limit
     json['page'] = page
     json['count'] = count
     return json
示例#33
0
文件: root.py 项目: apache/allura
 def index(self, limit=None, page=0, **kw):
     limit, page, start = g.handle_paging(limit, int(page))
     topics = model.Forum.thread_class().query.find(dict(discussion_id=self.forum._id))
     topics = topics.sort([('flags', pymongo.DESCENDING),
                           ('last_post_date', pymongo.DESCENDING)])
     topics = topics.skip(start).limit(limit)
     count = topics.count()
     json = {}
     json['forum'] = self.forum.__json__(limit=1)  # small limit since we're going to "del" the threads anyway
     # topics replace threads here
     del json['forum']['threads']
     json['forum']['topics'] = [dict(_id=t._id,
                                     subject=t.subject,
                                     num_replies=t.num_replies,
                                     num_views=t.num_views,
                                     url=h.absurl('/rest' + t.url()),
                                     last_post=t.last_post)
                                for t in topics if t.status == 'ok']
     json['count'] = count
     json['page'] = page
     json['limit'] = limit
     return json
示例#34
0
    def url_paginated(self):
        '''Return link to the thread with a #target that poins to this comment.

        Also handle pagination properly.
        '''
        if not self.thread:  # pragma no cover
            return None
        limit, p, s = g.handle_paging(None, 0)  # get paging limit
        if self.query.find(dict(thread_id=self.thread._id)).count() <= limit:
            # all posts in a single page
            page = 0
        else:
            posts = self.thread.find_posts()
            posts = self.thread.create_post_threads(posts)

            def find_i(posts):
                '''Find the index number of this post in the display order'''
                q = []

                def traverse(posts):
                    for p in posts:
                        if p['post']._id == self._id:
                            return True  # found
                        q.append(p)
                        if traverse(p['children']):
                            return True

                traverse(posts)
                return len(q)

            page = find_i(posts) // limit

        slug = h.urlquote(self.slug)
        url = self.main_url()
        if page == 0:
            return '%s?limit=%s#%s' % (url, limit, slug)
        return '%s?limit=%s&page=%s#%s' % (url, limit, page, slug)
示例#35
0
 def index(self, page=0, limit=DEFAULT_PAGE_LIMIT, **kw):
     c.revision_widget = self.revision_widget
     c.page_list = self.page_list
     result = dict(commit=self._commit)
     if self._commit:
         result.update(self._commit.context())
     tree = self._commit.tree
     limit, page, start = g.handle_paging(limit,
                                          page,
                                          default=self.DEFAULT_PAGE_LIMIT)
     diffs = self._commit.paged_diffs(start=start,
                                      end=start + limit,
                                      onlyChangedFiles=True)
     result['artifacts'] = []
     for t in ('added', 'removed', 'changed', 'copied', 'renamed'):
         for f in diffs[t]:
             if t in ('copied', 'renamed'):
                 filepath = f['new']
             else:
                 filepath = f
             is_text = filepath and tree.get_blob_by_path(
                 filepath) and tree.get_blob_by_path(filepath).has_html_view
             result['artifacts'].append(
                 (t, f, 'blob' if tree.get_blob_by_path(f) else 'tree',
                  is_text))
     count = diffs['total']
     result.update(dict(page=page, limit=limit, count=count))
     # Sort the result['artifacts'] which is in format as below -
     # [('added', u'aaa.txt', 'blob', True),
     # ('added', u'eee.txt', 'blob', True),
     # ('added', u'ggg.txt', 'blob', True),
     # ('removed', u'bbb.txt', 'tree', None),
     # ('removed', u'ddd.txt', 'tree', None),
     # ('changed', u'ccc.txt', 'blob', True)]
     result['artifacts'].sort(
         key=lambda x: x[1]['old'] if (isinstance(x[1], dict)) else x[1])
     return result
示例#36
0
文件: discuss.py 项目: apache/allura
    def url_paginated(self):
        '''Return link to the thread with a #target that poins to this comment.

        Also handle pagination properly.
        '''
        if not self.thread:  # pragma no cover
            return None
        limit, p, s = g.handle_paging(None, 0)  # get paging limit
        if self.query.find(dict(thread_id=self.thread._id)).count() <= limit:
            # all posts in a single page
            page = 0
        else:
            posts = self.thread.find_posts()
            posts = self.thread.create_post_threads(posts)

            def find_i(posts):
                '''Find the index number of this post in the display order'''
                q = []

                def traverse(posts):
                    for p in posts:
                        if p['post']._id == self._id:
                            return True  # found
                        q.append(p)
                        if traverse(p['children']):
                            return True
                traverse(posts)
                return len(q)

            page = find_i(posts) / limit

        slug = h.urlquote(self.slug)
        url = self.main_url()
        if page == 0:
            return '%s?limit=%s#%s' % (url, limit, slug)
        return '%s?limit=%s&page=%s#%s' % (url, limit, page, slug)
示例#37
0
def search_app(q='', fq=None, app=True, **kw):
    """Helper for app/project search.

    Uses dismax query parser. Matches on `title` and `text`. Handles paging, sorting, etc
    """
    from allura.model import ArtifactReference
    from allura.lib.security import has_access

    history = kw.pop('history', None)
    if app and kw.pop('project', False):
        # Used from app's search controller. If `project` is True, redirect to
        # 'entire project search' page
        redirect(c.project.url() + 'search/?' +
                 urlencode(dict(q=q, history=history)))
    search_comments = kw.pop('search_comments', None)
    limit = kw.pop('limit', None)
    page = kw.pop('page', 0)
    default = kw.pop('default', 25)
    allowed_types = kw.pop('allowed_types', [])
    parser = kw.pop('parser', None)
    sort = kw.pop('sort', 'score desc')
    fq = fq if fq else []
    search_error = None
    results = []
    count = 0
    matches = {}
    limit, page, start = g.handle_paging(limit, page, default=default)
    if not q:
        q = ''
    else:
        # Match on both `title` and `text` by default, using 'dismax' parser.
        # Score on `title` matches is boosted, so title match is better than body match.
        # It's 'fuzzier' than standard parser, which matches only on `text`.
        if search_comments:
            allowed_types += ['Post']
        if app:
            fq = [
                'project_id_s:%s' % c.project._id,
                'mount_point_s:%s' % c.app.config.options.mount_point,
                '-deleted_b:true',
                'type_s:(%s)' %
                ' OR '.join(['"%s"' % t for t in allowed_types])
            ] + fq
        search_params = {
            'qt': 'dismax',
            'qf': 'title^2 text',
            'pf': 'title^2 text',
            'fq': fq,
            'hl': 'true',
            'hl.simple.pre': '#ALLURA-HIGHLIGHT-START#',
            'hl.simple.post': '#ALLURA-HIGHLIGHT-END#',
            'sort': sort,
        }
        if not history:
            search_params['fq'].append('is_history_b:False')
        if parser == 'standard':
            search_params.pop('qt', None)
            search_params.pop('qf', None)
            search_params.pop('pf', None)
        try:
            results = search(q,
                             short_timeout=True,
                             ignore_errors=False,
                             rows=limit,
                             start=start,
                             **search_params)
        except SearchError as e:
            search_error = e
        if results:
            count = results.hits
            matches = results.highlighting

            def historize_urls(doc):
                if doc.get('type_s', '').endswith(' Snapshot'):
                    if doc.get('url_s'):
                        doc['url_s'] = doc['url_s'] + \
                            '?version=%s' % doc.get('version_i')
                return doc

            def add_matches(doc):
                m = matches.get(doc['id'], {})
                title = h.get_first(m, 'title')
                text = h.get_first(m, 'text')
                if title:
                    title = (jinja2.escape(title).replace(
                        '#ALLURA-HIGHLIGHT-START#',
                        jinja2.Markup('<strong>')).replace(
                            '#ALLURA-HIGHLIGHT-END#',
                            jinja2.Markup('</strong>')))
                if text:
                    text = (jinja2.escape(text).replace(
                        '#ALLURA-HIGHLIGHT-START#',
                        jinja2.Markup('<strong>')).replace(
                            '#ALLURA-HIGHLIGHT-END#',
                            jinja2.Markup('</strong>')))
                doc['title_match'] = title
                doc['text_match'] = text or h.get_first(doc, 'text')
                return doc

            def paginate_comment_urls(doc):
                if doc.get('type_s', '') == 'Post':
                    artifact = doc['_artifact']
                    if artifact:
                        doc['url_paginated'] = artifact.url_paginated()
                return doc

            def filter_unauthorized(doc):
                aref = ArtifactReference.query.get(_id=doc.get('id'))
                # cache for paginate_comment_urls to re-use
                doc['_artifact'] = aref and aref.artifact
                # .primary() necessary so that a ticket's comment for example is checked with the ticket's perms
                if doc['_artifact'] and not has_access(
                        doc['_artifact'].primary(), 'read', c.user):
                    return None
                else:
                    return doc

            filtered_results = [
                _f for _f in map(filter_unauthorized, results) if _f
            ]
            count -= len(results) - len(filtered_results)
            results = filtered_results
            results = map(historize_urls, results)
            results = map(add_matches, results)
            results = map(paginate_comment_urls, results)

    # Provide sort urls to the view
    score_url = 'score desc'
    date_url = 'mod_date_dt desc'
    try:
        field, order = sort.split(' ')
    except ValueError:
        field, order = 'score', 'desc'
    sort = ' '.join([field, 'asc' if order == 'desc' else 'desc'])
    if field == 'score':
        score_url = sort
    elif field == 'mod_date_dt':
        date_url = sort
    params = request.GET.copy()
    params.update({'sort': score_url})
    score_url = url(request.path, params=params)
    params.update({'sort': date_url})
    date_url = url(request.path, params=params)
    return dict(q=q,
                history=history,
                results=list(results) or [],
                count=count,
                limit=limit,
                page=page,
                search_error=search_error,
                sort_score_url=score_url,
                sort_date_url=date_url,
                sort_field=field)
示例#38
0
    def test_with_invalid_limit(self):
        self.assertEqual(g.handle_paging('foo', 0, 30), (30, 0, 0))

        c.user.set_pref('results_per_page', 'bar')
        self.assertEqual(g.handle_paging(None, 0, 30), (30, 0, 0))
示例#39
0
 def test_with_invalid_page(self):
     self.assertEqual(g.handle_paging(10, 'asdf', 30), (10, 0, 0))
示例#40
0
    def test_with_invalid_limit(self):
        self.assertEqual(g.handle_paging('foo', 0, 30), (30, 0, 0))

        c.user.set_pref('results_per_page', 'bar')
        self.assertEqual(g.handle_paging(None, 0, 30), (30, 0, 0))
示例#41
0
文件: search.py 项目: apache/allura
def search_app(q='', fq=None, app=True, **kw):
    """Helper for app/project search.

    Uses dismax query parser. Matches on `title` and `text`. Handles paging, sorting, etc
    """
    history = kw.pop('history', None)
    if app and kw.pop('project', False):
        # Used from app's search controller. If `project` is True, redirect to
        # 'entire project search' page
        redirect(c.project.url() + 'search/?' +
                 urlencode(dict(q=q, history=history)))
    search_comments = kw.pop('search_comments', None)
    limit = kw.pop('limit', None)
    page = kw.pop('page', 0)
    default = kw.pop('default', 25)
    allowed_types = kw.pop('allowed_types', [])
    parser = kw.pop('parser', None)
    sort = kw.pop('sort', 'score desc')
    fq = fq if fq else []
    search_error = None
    results = []
    count = 0
    matches = {}
    limit, page, start = g.handle_paging(limit, page, default=default)
    if not q:
        q = ''
    else:
        # Match on both `title` and `text` by default, using 'dismax' parser.
        # Score on `title` matches is boosted, so title match is better than body match.
        # It's 'fuzzier' than standard parser, which matches only on `text`.
        if search_comments:
            allowed_types += ['Post']
        if app:
            fq = [
                'project_id_s:%s' % c.project._id,
                'mount_point_s:%s' % c.app.config.options.mount_point,
                '-deleted_b:true',
                'type_s:(%s)' % ' OR '.join(
                    ['"%s"' % t for t in allowed_types])
            ] + fq
        search_params = {
            'qt': 'dismax',
            'qf': 'title^2 text',
            'pf': 'title^2 text',
            'fq': fq,
            'hl': 'true',
            'hl.simple.pre': '#ALLURA-HIGHLIGHT-START#',
            'hl.simple.post': '#ALLURA-HIGHLIGHT-END#',
            'sort': sort,
        }
        if not history:
            search_params['fq'].append('is_history_b:False')
        if parser == 'standard':
            search_params.pop('qt', None)
            search_params.pop('qf', None)
            search_params.pop('pf', None)
        try:
            results = search(
                q, short_timeout=True, ignore_errors=False,
                rows=limit, start=start, **search_params)
        except SearchError as e:
            search_error = e
        if results:
            count = results.hits
            matches = results.highlighting

            def historize_urls(doc):
                if doc.get('type_s', '').endswith(' Snapshot'):
                    if doc.get('url_s'):
                        doc['url_s'] = doc['url_s'] + \
                            '?version=%s' % doc.get('version_i')
                return doc

            def add_matches(doc):
                m = matches.get(doc['id'], {})
                title = h.get_first(m, 'title')
                text = h.get_first(m, 'text')
                if title:
                    title = (jinja2.escape(title)
                                   .replace('#ALLURA-HIGHLIGHT-START#', jinja2.Markup('<strong>'))
                                   .replace('#ALLURA-HIGHLIGHT-END#', jinja2.Markup('</strong>')))
                if text:
                    text = (jinja2.escape(text)
                                  .replace('#ALLURA-HIGHLIGHT-START#', jinja2.Markup('<strong>'))
                                  .replace('#ALLURA-HIGHLIGHT-END#', jinja2.Markup('</strong>')))
                doc['title_match'] = title
                doc['text_match'] = text or h.get_first(doc, 'text')
                return doc

            def paginate_comment_urls(doc):
                from allura.model import ArtifactReference

                if doc.get('type_s', '') == 'Post':
                    aref = ArtifactReference.query.get(_id=doc.get('id'))
                    if aref and aref.artifact:
                        doc['url_paginated'] = aref.artifact.url_paginated()
                return doc
            results = imap(historize_urls, results)
            results = imap(add_matches, results)
            results = imap(paginate_comment_urls, results)

    # Provide sort urls to the view
    score_url = 'score desc'
    date_url = 'mod_date_dt desc'
    try:
        field, order = sort.split(' ')
    except ValueError:
        field, order = 'score', 'desc'
    sort = ' '.join([field, 'asc' if order == 'desc' else 'desc'])
    if field == 'score':
        score_url = sort
    elif field == 'mod_date_dt':
        date_url = sort
    params = request.GET.copy()
    params.update({'sort': score_url})
    score_url = url(request.path, params=params)
    params.update({'sort': date_url})
    date_url = url(request.path, params=params)
    return dict(q=q, history=history, results=list(results) or [],
                count=count, limit=limit, page=page, search_error=search_error,
                sort_score_url=score_url, sort_date_url=date_url,
                sort_field=field)