def paginate_link_tag(item): """ Create an A-HREF tag that points to another page usable in paginate. """ a_tag = Page.default_link_tag(item) if item['type'] == 'current_page': return make_html_tag('li', a_tag, **{'class': 'blue white-text'}) return make_html_tag('li', a_tag)
def photos(self): query = self.photo_service.approved_photos() photos = Page(query, page=int(self.request.matchdict["page"]), items_per_page=3, item_count=len(query)) return {"photos": photos}
def photos_tobe_approved(self): """Return paginated unapproved photos""" query = self.photo_service.unapproved_photos() photos = Page(query, page=int(self.request.matchdict["page"]), items_per_page=3, item_count=len(query)) return {"photos": photos}
def __init__(self, request): self.page = request.GET.get('page', '1') from paginate import Page self.paged_articles = Page( ARTICLES, page=self.page, items_per_page=8, )
def admin_docker_list_view(context, request): """Show list of docker images.""" return { 'paginator': Page(context.all, url_maker=lambda p: request.path_url + "?page=%s" % p, page=int(request.params.get('page', 1)), items_per_page=6) }
def list_converted_xml_view(request): list_files_xmls = files.xml_files_list(config.get("CONVERSION_PATH")) list_files_xmls += files.xml_files_list(config.get("VALID_XML_PATH")) xmls = Page( list_files_xmls, page=int(request.params.get("page", 1)), items_per_page=20, item_count=len(list_files_xmls), ) return {"xmls": xmls, "page_title": "Lista de XMLS Convertidos"}
def __init__(self, request): self.page = request.GET.get('page', '1') from paginate import Page dbsession = Session() articles = dbsession.query(Articles).all() self.paged_articles = Page( articles, page=self.page, items_per_page=8, ) dbsession.close()
def _paginate_zones(self, zones, page, per_page, url, url_rel, pattern, index): p = Page(zones, page=page, items_per_page=per_page, item_count=zones.count()) i = url.find("/zones") + 1 link_add_delete = "" if url[i:].find('/') == -1: url_rel = 'zones/' + url_rel link_add_delete = 'zones/' pagination = p.pager(pattern, url=url_rel, dotdot_attr={'x': 5}, link_attr={'y': 6}, curpage_attr={'z': 77}) return { "zones": p, "pagination": pagination, "index": index, "link_add_delete": link_add_delete }
def search(): q = '{}'.format(request.vars.q) if request.vars.q else '*' form = form = SQLFORM.factory( Field("title"), Field("press_id"), formstyle='divs', submit_button="Search", ) if form.process().accepted: title = form.vars.title sort = ['title_de', 'title_en'] start = 0 rows = 10 fq = {'title_en': '*', 'locale': 'de'} exc = {'submission_id': '42'} fl = ['title_de', 'submission_id', 'press_id', 'title_en'] if myconf.take("plugins.solr") == str(1): solr = OMPSOLR(db, myconf) # r = solr.si.query(solr.si.Q(title_en=title) | solr.si.Q(title_de=title)) # r = solr.si.query(solr.si.Q(title_de='*Leben*')) r = solr.si.query( solr.si.Q(q.decode('utf-8')) & solr.si.Q(press_id=myconf.take('omp.press_id'))) # for s in sort: # r =r.sort_by(s) # r = r.filter(**fq) # r = r.exclude(**exc) # r = r.field_limit(fl) # r = r.highlight(q.keys()) r = r.paginate(start=start, rows=rows) results = r.execute() hl = results.highlighting from paginate import Page, make_html_tag def paginate_link_tag(item): """ Create an A-HREF tag that points to another page usable in paginate. """ a_tag = Page.default_link_tag(item) if item['type'] == 'current_page': return make_html_tag('li', a_tag, **{'class': 'active'}) return make_html_tag('li', a_tag) p = Page(['test', 'test2'], page=15, items_per_page=15, item_count=10) return locals()
def get_wrapper(obj, *args, **kwargs): if isinstance(obj, (list, tuple)): return Page(obj, *args, **kwargs) if isinstance(obj, sqlalchemy.orm.query.Query): return SqlalchemyOrmPage(obj, *args, **kwargs) required_methods = ["__iter__", "__len__", "__getitem__"] for m in required_methods: if not hasattr(obj, m): break else: return obj raise TypeError(INCOMPATIBLE_COLLECTION_TYPE)
def __iter__(self): self.start('200 OK', [('Content-Type', 'text/html')]) from paginate import Page from urlparse import parse_qs values = parse_qs(self.environ['QUERY_STRING']) page = values.get('page', [ '1', ]).pop() paged_articles = Page( ARTICLES, page=page, items_per_page=8, ) yield env.get_template('index.html').render(articles=paged_articles)\ .encode('utf-8')
def __iter__(self): self.start('200 OK', [('Content-Type', 'text/html')]) # Get page number from urllib.parse import parse_qs values = parse_qs(self.environ['QUERY_STRING']) # Wrap articles to paginated list from paginate import Page page = values.get('page', [ '1', ]).pop() paged_articles = Page( ARTICLES, page=page, items_per_page=8, ) yield str.encode( env.get_template('index.html').render(articles=paged_articles))
class Pager(Component): """A pager component based on paginate.Page. >>> pager = Pager(None, range(100), page=4, url_maker=lambda p: 'page %s' % p) >>> assert pager.render() """ def __init__(self, req, collection, **kw): self.req = req self.page = Page(collection, **kw) def render(self): html = self.page.pager( format='<div class="pagination"><ul><li>~3~</li></ul></div>', separator='</li><li>', curpage_attr={'class': 'active'}, dotdot_attr={'class': 'disabled'}) return html\ .replace('<span ', '<a ')\ .replace('</span>', '</a>')\ .replace('<li><a class="active"', '<li class="active"><a')\ .replace('<li><a class="disabled"', '<li class="disabled"><a')
def search(): query = request.args.get('q') location = request.args.get('l') page = request.args.get('page', 1) jobListings = [] errors = [] jobSearchQuery = JobSearchQuery(query, location) memoizedGetAllJobListings = redisMemoize(getAllJobListings) jobListings = memoizedGetAllJobListings(jobSearchQuery) # Sort job listings by post date jobListings.sort(key=lambda jobListing: jobListing.postDate, reverse=True) jobPaginator = Page(jobListings, page=page) pageURL = url_for('search', q=query, l=location, _external=True) + '&page=$page' return render_template('search_results.html', jobSearchQuery=jobSearchQuery, jobListings=jobPaginator, pageURL=pageURL, errors=errors)
def ElasticsearchPage(*args, **kwargs): # noqa kwargs.setdefault("wrapper_class", _ElasticsearchWrapper) return Page(*args, **kwargs)
def paginator(cls, request, page=1): urlmaker = UrlMaker(request) return Page(Entry.all(), page, url_maker=urlmaker, items_per_page=10)
def __init__(self, req, collection, **kw): self.req = req self.page = Page(collection, **kw)
def inner_func(): return Page(query_database_inner(rules=rules, limit=limit), page=page, items_per_page=items_per_page)