async def get(self): page = self.get_argument('page', '1').strip() if not page: page = 1 try: page = int(page) except: page = 1 number = self.get_argument('number', '3').strip() if not number: number = 3 try: number = int(number) except: number = 3 posts = self.db.query(Posts).filter( Posts.is_deleted == False).order_by(desc(Posts.update_time), desc(Posts.id)) try: pg = paginate(posts, page, number) except: pg = paginate(posts, 1, 3) await self.render('posts/explore.html', posts=pg.items, number=number, pg=pg, page_number=int(page))
def search_movie() -> Response: substring: OPT_STR = request.args.get('filter') year: OPT_STR = request.args.get('year') top: OPT_STR = request.args.get('top') size: OPT_STR = request.args.get('size') page: OPT_STR = request.args.get('page') with create_session() as session: if size and page: size: int = int(size) page: int = int(page) if substring: movies: OPT_MOVIES = paginate( session.query(Movie).filter(Movie.name.contains(substring)), page, size, ).items elif year: movies: OPT_MOVIES = paginate( session.query(Movie).filter(Movie.year == int(year)), page, size ).items elif top: movies: OPT_MOVIES = paginate( session.query(Movie) .join(MovieRating) .group_by(Movie.id) .order_by(func.avg(MovieRating.rating)), page, size, ).items else: movies: OPT_MOVIES = paginate(session.query(Movie), page, size).items else: if substring: movies: OPT_MOVIES = session.query(Movie).filter( Movie.name.contains(substring) ) elif year: movies: OPT_MOVIES = session.query(Movie).filter( Movie.year == int(year) ) elif top: movies: OPT_MOVIES = session.query(Movie).join(MovieRating).group_by( Movie.id ).order_by(func.avg(MovieRating.rating)).limit(int(top)) else: movies: OPT_MOVIES = session.query(Movie).all() result: dict = { 'Movies': [ {'id': movie.id, 'name': movie.name, 'year': movie.year} for movie in movies ] } return make_response(jsonify(result), HTTPStatus.OK)
def get_timeline(self, page, page_size=20, watchlist=None, watchwords=None): ''' Get Tweets and Retweets from a user's timeline. If the cache is expired, ''' if self._cache_expired(TimelineSQL): self._fetch_timeline() with self._session() as session: if watchlist: watchlist = get_watchlist(watchlist, kind=BaquetConstants.WATCHLIST) # When filtering, we are not interested in Tweets authored by the user. if self._user_id in watchlist: watchlist.remove(self._user_id) join_id = self._add_temp_join(watchlist) results = paginate(session.query(TimelineSQL).join( UserTempJoinSQL, and_( or_( TimelineSQL.retweet_user_id == UserTempJoinSQL.join_on, TimelineSQL.user_id == UserTempJoinSQL.join_on), UserTempJoinSQL.join_id == join_id)).order_by( desc(TimelineSQL.created_at)), page=page, page_size=page_size) else: results = paginate(session.query(TimelineSQL).order_by( desc(TimelineSQL.created_at)), page=page, page_size=page_size) if watchwords: watchwords = get_watchlist(watchwords, kind=BaquetConstants.WATCHWORDS) results.items = filter_for_watchwords(results.items, watchwords) results = serialize_paginated_entities(results) if watchlist: self._remove_temp_join(join_id) return load_model(results, TweetPaginatorModel)
def get_all_posts(self, page=1): """ 获取所有图片 """ data_all = self.session.query(Post).filter(Post.user_id != None).order_by(-Post.id) pg = paginate(data_all, page, 14) return pg
def get(self): results = ResultsModel.query.filter(ResultsModel.user_id == self.args['user_id']).order_by(ResultsModel.matches.desc()) results_count = results.count() results = paginate(results, self.args['page'], self.args['per_page']) return jsonify(results=results_schema.dump(results.items).data, count=results_count)
def test_out_of_bounds(self): pagination = paginate(self.query, 2, 25) pagination.items.should.have.length_of(0) paginate.when.called_with(self.query, 0, 25).should.throw(AttributeError) paginate.when.called_with(self.query, 1, 0).should.throw(AttributeError)
def ads_list(): page_number = request.args.get(get_page_parameter(), type=int, default=1) oblast_district = request.args.get('oblast_district') min_price = request.args.get('min_price', type=int) max_price = request.args.get('max_price', type=int) new_building = request.args.get('new_building', type=bool) filtered_ads_query = get_filtered_ads_query( oblast_district=oblast_district, min_price=min_price, max_price=max_price, new_building=new_building, ) ads_page = paginate( query=filtered_ads_query, page=page_number, page_size=ADS_PER_PAGE, ) pagination = Pagination( page=page_number, per_page=ADS_PER_PAGE, total=ads_page.total, bs_version=3, ) return render_template( 'ads_list.html', ads=ads_page.items, pagination=pagination, oblast_district=oblast_district, min_price=min_price, max_price=max_price, new_building=new_building, )
def persist_destination_data(page, table, page_size=30, source_schema='public', destination_schema='public', models_module='model'): source_session = get_source_session(source_schema, destination_schema) destination_session = get_destination_session() models = get_models_module(models_module) model = getattr(models, guess_model_name(table)) pk = [f"{table}.{col.name}" for col in inspect(model).primary_key] items = paginate(source_session.query(model).order_by(text(",".join(pk))), page, page_size).items sources = [deepcopy(row) for row in items] pb = ProgressBar(total=page_size+1, prefix=f'Page {page} Pid: {os.getpid()}') for source in sources: logger.info('merging data data %s', source) destination_session.merge(source) pb.next() destination_session.flush() destination_session.commit() destination_session.expunge_all() destination_session.close() pb.next() logger.info('persited page %d', page)
def get(cls, username): """ Getting GET requests on the '/api/users/<username>/comments/?pagenumber=N' endpoint, and returning up to 500 user' comments """ try: pagenumber = {"pagenumber": request.args.get("pagenumber")} incoming_pagination = news_pagination_schema.load(pagenumber) except ValidationError as err: return err.messages, 400 if incoming_pagination["pagenumber"] <= 0: return make_response( jsonify({ "message": "pagenumber must be greater then 0", "code": 400 }), 400, ) try: username = {"username": username} incoming_username = username_schema.load(username) except ValidationError as err: return err.messages, 400 db_session = g.flask_backend_session users_comments = db_session.query(BlogNewsStoryComment).filter( BlogNewsStoryComment.by == incoming_username['username']).all() if not users_comments: return make_response( jsonify({ 'message': 'comments not found', 'code': 404 }), 404) page = paginate( db_session.query(BlogNewsStoryComment).filter( BlogNewsStoryComment.by == incoming_username['username']).order_by( desc(BlogNewsStoryComment.time)).limit(500).from_self(), incoming_pagination["pagenumber"], 30, ) result_page = { "current_page": incoming_pagination["pagenumber"], "has_next": page.has_next, "has_previous": page.has_previous, "items": blognews_stories_schema.dump(page.items), "next_page": page.next_page, "previous_page": page.previous_page, "pages": page.pages, "total": page.total, } if incoming_pagination["pagenumber"] > result_page["pages"]: return make_response( jsonify({ "message": "Pagination page not found", "code": 404 }), 404, ) return jsonify(result_page)
async def get_currencies_data(self, paginated: bool = True, page: int = 1, page_size: int = 1) -> dict: session = self._Session() try: query = session.query(Currency).order_by(Currency.id) if paginated: result = paginate(query, page, page_size) else: items = query.all() total = query.order_by(None).count() result = Page(items, 1, total or 1, total) except Exception as e: raise e finally: session.close() return { 'data': [item.asdict() for item in result.items], 'page': page, 'page_size': page_size, 'total_pages': result.pages, 'total_items': result.total, 'has_next': result.has_next, 'has_prev': result.has_previous }
def users_page(page=1): if current_user.role < 3: abort(403) session = create_session() current_page = paginate(session.query(User), page, 30) return render_template('users.html', title='Users', current_page=current_page)
def paginations(self, page): """ sqlalchemy_pagination 的分页库 :param page: :return: """ # 第一个参数传入post的个数 , 第二个参数是页数, 第三个参数是一页多少个 return paginate(self.get_post_obj(), int(page), 4)
def get_work_orders(session, page, size): # workorder_worders = session.query(WorkOrder).all() # workorder_worders = session.query(WorkOrder).order_by(WorkOrder.creationDate.desc()).limit(5).all() pagination = paginate( session.query(WorkOrderRadio).order_by( WorkOrderRadio.creationDate.desc()), int(page), int(size)) return pagination
def api_list_response(request, model, schema): pager = PageParser(request) paginator = paginate(query=model.query.filter( model.company_id == g.user.company_id).order_by( model.created_at.desc()), page=pager.current_page, page_size=pager.page_size) return PaginatedResponse(pager, paginator, schema)()
def get_followers(self, page, page_size=100, watchlist=None): ''' Get the users followed by this user. If cache is expired, fetch them. ''' if self._cache_expired(FollowersSQL): self._fetch_followers() if watchlist: watchlist = get_watchlist(watchlist, kind=BaquetConstants.WATCHLIST) join_id = self._add_temp_join(watchlist) with self._session() as session: results = paginate(session.query(FollowersSQL).join( UserTempJoinSQL, and_(FollowersSQL.user_id == UserTempJoinSQL.join_on, UserTempJoinSQL.join_id == join_id)), page=page, page_size=page_size) self._remove_temp_join(join_id) if results.items: hydrated_results = hydrate_user_identifiers( user_ids=[result.user_id for result in results.items]) else: hydrated_results = [] new_items = [] for item in results.items: for result in hydrated_results: if item.user_id == result.user_id: setattr(item, "user", result) new_items.append(item) results.items = new_items return load_model(results, RelationshipPaginatorModel) with self._session() as session: results = paginate(session.query(FollowersSQL), page=page, page_size=page_size) return load_model(results, RelationshipPaginatorModel)
def get_cron_jobs_schema(session, page, size): schema = CronJobSchema(many=True) page = paginate( session.query(CronJob).order_by(CronJob.creationDate.desc()), int(page), int(size)) for cronjob in page.items: cronjob.update_lastrun(session) page.items = schema.dump(page.items) return page.__dict__
def get_pg(self, page=1, page_size=8, username=None): """获取分页对象""" if username: user = self.get_user(username) qs = self.db.query(Post).filter_by(user=user) else: qs = self.db.query(Post) pg = paginate(qs, page, page_size) return pg
def run(self): xprint('Computing', 'inf', 'running...') # Recover her if already. # ToDo. stop = False while True: # Look for requests session = self.storing.hook("Computing") current_node = session.query(Instance).first() if current_node.status == "stopped": stop = True self.storing.release("Computing") if stop: xprint('Computing', 'inf', 'stopping...') break session = self.storing.hook("Computing") coms = session.query(Communication).filter(Communication.receiver == "computing").all() for com in coms: if com: request = {'type':'received', 'comm':com.dict()} self.storing.release("Computing") self.process(request) else: self.storing.release("Computing") # Look for responses session = self.storing.hook("Computing") coms = session.query(Communication).filter(Communication.sender == "computing").all() for com in coms: if com: request = {'type':'sent', 'comm': com.dict()} self.storing.release("Computing") self.process(request) else: self.storing.release("Computing") # Look for queu and new contracts to add # next request contains a check for the running containers. # and the addition in the queu of new contracts. session = self.storing.hook("Computing") jobs = session.query(Queu).all() ## Check if the containers are done. request = {'type':'jobs', 'jobs':[j.dict() for j in jobs]} self.storing.release("Computing") ## Look for the next 10 contracts session = self.storing.hook("Computing") pagination = paginate(session.query(Contract).filter(Contract.status == "added").order_by(Contract.reward), 1, 10) cntrs = pagination.items # xprint("Computing", "deb", "Paginate: {0}".format(len(cntrs))) # break ## Check if the containers are done. request['contracts'] = [c.dict() for c in cntrs] self.storing.release("Computing") self.process(request) time.sleep(10) xprint('Computing', 'inf', 'stopped.')
def get_favorites(self, page, page_size=20, watchlist=None, watchwords=None): ''' Get the posts a user has liked. If cache is expired, fetch them. ''' if self._cache_expired(FavoritesSQL): self._fetch_favorites() with self._session() as session: if watchlist: watchlist = get_watchlist(watchlist, kind=BaquetConstants.WATCHLIST) join_id = self._add_temp_join(watchlist) results = paginate(session.query(FavoritesSQL).join( UserTempJoinSQL, and_(FavoritesSQL.user_id == UserTempJoinSQL.join_on, UserTempJoinSQL.join_id == join_id)).order_by( desc(FavoritesSQL.created_at)), page=page, page_size=page_size) else: results = paginate(session.query(FavoritesSQL).order_by( desc(FavoritesSQL.created_at)), page=page, page_size=page_size) if watchwords: watchwords = get_watchlist(watchwords, kind=BaquetConstants.WATCHWORDS) results.items = filter_for_watchwords(results.items, watchwords) # This maneuver seems to be required for sqlalchemy... results = serialize_paginated_entities(results) if watchlist: self._remove_temp_join(join_id) return load_model(results, TweetPaginatorModel)
def get_object_list(cls, db_session, page_num=1): """ 获取追踪代码的列表页面数据 :param db_session: :param page_num: :return: 返回的字段包括:campaign_name, code, clicks, created_time """ object_query = (db_session.query(cls.model)) object_list = paginate(object_query, page_num, Config.NUM_PER_PAGE) return object_list
def get(cls): """ Getting GET requests on the '/api/hackernews/newstories/?pagenumber=N' endpoint, and returning a page with 30 hacker_news new_stories from database. """ try: pagenumber = {"pagenumber": request.args.get("pagenumber")} incoming_pagination = page_number_schema.load(pagenumber) except ValidationError as err: return err.messages, 400 if incoming_pagination["pagenumber"] <= 0: return make_response( jsonify({ "message": "pagenumber must be greater then 0", "code": 400 }), 400, ) db_session = g.hacker_news_session stories = db_session.query(HackerNewsNewStory).all() if not stories: return make_response( jsonify({ "message": "No hackernews newstories found", "code": 404 }), 404, ) page = paginate( db_session.query(HackerNewsNewStory).order_by( desc(HackerNewsNewStory.parsed_time)).limit(500).from_self(), incoming_pagination["pagenumber"], 30, ) result_page = { "current_page": incoming_pagination["pagenumber"], "has_next": page.has_next, "has_previous": page.has_previous, "items": stories_schema.dump(page.items), "next_page": page.next_page, "previous_page": page.previous_page, "pages": page.pages, "total": page.total, } if incoming_pagination["pagenumber"] > result_page["pages"]: return make_response( jsonify({ "message": "Pagination page not found", "code": 404 }), 404, ) return jsonify(result_page)
def get_directory(self, page, page_size=20): ''' Get users in the directory. ''' with self._session() as session: results = serialize_paginated_entities( paginate(session.query(DirectorySQL).order_by( DirectorySQL.screen_name), page=page, page_size=page_size)) return load_model(results, UserPaginatorModel)
def get_notes_user(self, page, page_size=20): ''' Get user notes. ''' with self._session() as session: results = paginate(session.query(UserNotesSQL).order_by( desc(UserNotesSQL.created_at)), page=page, page_size=page_size) return load_model(results, NotePaginatorModel)
def index(): page = request.args.get("page") if not page: page = 1 messages_query = db.query(Message) messages = paginate(query=messages_query, page=int(page), page_size=5) return render_template("index.html", messages=messages)
def get(self): symptoms = [] subpart = SubpartsModel.query.filter(SubpartsModel.id == self.args['subpart_id']).first() for condition in subpart.conditions: for symptom in condition.symptoms: symptoms.append(symptom.id) symptoms = list(dict.fromkeys(symptoms)) symptoms = SymptomsModel.query.filter(SymptomsModel.id.in_(symptoms)) query_list = paginate(symptoms, self.args['page'], self.args['per_page']) return jsonify(symptoms=symptoms_schema.dump(query_list.items).data)
def fetch_paginated(self, session, page_number): query = session.query(self.__class__) paginator = paginate(query, int(page_number), 25) if int(page_number) > paginator.pages: return [], None pages_info = { 'total_pages': paginator.pages, 'has_next': paginator.has_next, 'has_previous': paginator.has_previous, 'next_page': paginator.next_page, 'current_page': page_number, } return paginator.items, pages_info
def get_watchlist_users(self, page, page_size=20): ''' Get the watchlist as a list of Users with details. ''' with self._session() as session: results = paginate( session.query(WatchlistSQL), page=page, page_size=page_size, ) results = serialize_paginated_entities(results) return load_model(results, UserPaginatorModel)
def contracts(self, page): xprint('Computing', 'deb', 'Fetching all contracts list on page [{0}].'.format(page)) session = self.storing.hook("Computing") ## Access in between cntrs = {'list':[], 'size':0, 'pages': 0} cnts = session.query(Contract).all() pagination = paginate(session.query(Contract), 1, 10) pages = pagination.pages if pages > 0: if page > pages: cnts = paginate(session.query(Contract), pages, 10).items elif page < 1: cnts = pagination.items else: cnts = paginate(session.query(Contract), page, 10).items for cntr in cnts: cntrs['list'].append(cntr.dict()) cntrs['size'] = pagination.total cntrs['pages'] = pages self.storing.release("Computing") xprint('Computing', 'inf', '\n{0}.'.format(json.dumps(cntrs, sort_keys=True, indent=4)))
def get_object_list(cls, db_session, page_num=1): """获取email登录的列表页面 :param db_session: :param page_num: :return: 返回的字段包括:email, order_count, created_time """ order_count_query = (db_session.query(func.count(OrderModel.id)).join( cls.model).filter(OrderModel.email_id == cls.model.id)) object_query = (db_session.query( cls.model.email, order_count_query.label('order_count'), cls.model.created_time)) object_list = paginate(object_query, page_num, Config.NUM_PER_PAGE) return object_list
def on_get(self, req, resp, args): page_num = args['page_num'] limit = args['limit'] cus_list = req.context.db_session.query(Customer) page = paginate(cus_list, page_num, limit) results = { 'customers': [cus.as_dict for cus in page.items], 'total': page.total, 'pages': page.pages, 'next_page': page.next_page, 'previous_page': page.previous_page, } resp.status = falcon.HTTP_200 resp.media = results
def test_out_of_bounds(self): pagination = paginate(self.query, 2, 25) pagination.items.should.have.length_of(0) paginate.when.called_with(self.query, 0, 25).should.throw(AttributeError) paginate.when.called_with(self.query, 1, 0).should.throw(AttributeError)
def test_items(self): pagination = paginate(self.query, 1, 5) pagination.items.should.have.length_of(5) pagination = paginate(self.query, 9, 3) pagination.items.should.have.length_of(1)
def test_pages(self): pagination = paginate(self.query, 1, 3) pagination.pages.should.equals(9)
def test_next_page(self): pagination = paginate(self.query, 1, 5) pagination.next_page.should.equals(2) pagination.has_next.should.be(True)
def test_previous_page(self): pagination = paginate(self.query, 2, 5) pagination.previous_page.should.equals(1) pagination.has_previous.should.be(True)
def test_paginate_has_next(self): pagination = paginate(self.query, 5, 5) pagination.has_next.should.be(False) pagination.next_page.should.be(None)
def test_paginate_has_previous(self): pagination = paginate(self.query, 1, 5) pagination.has_previous.should.be(False) pagination.previous_page.should.be(None)
def test_paginate_total(self): pagination = paginate(self.query, 1, 5) pagination.total.should.equals(self.total_users)