def get_words(self, limit, page_number, callback=None): session = self.db_session result = None word = Word() meta_obj = {} try: query = session.query(Word) paginator = Paginator(query, int(limit)) page = paginator.page(int(page_number)) result = self.serialize_alchemy(page.object_list) meta_obj = { "page": page_number, "limit": limit, "next_page": page.has_next() and page.next_page_number or None, "previous_page": page.has_previous() and page.previous_page_number or None, "page_count": paginator.total_pages, "total_count": paginator.count, "Links": generate_meta( type(word).__name__.lower(), limit, page_number, paginator.total_pages) } except Exception as e: result = [] callback(result, meta_obj)
def test_pagination(self): query = self.session.query(self.Person) paginator = Paginator(query, 10) page = paginator.page(2) self.assertEquals(paginator.total_pages, 100, msg="Total page= %s, expected= %s" % (paginator.total_pages, 100)) self.assertEquals(paginator.count, 1000, msg="Total records= %s, expected= %s" % (paginator.count, 1000)) self.assertEquals(len(page.object_list), 10, msg="Total records in page= %s, expected= %s" % (len(page.object_list), 10)) self.assertEquals(page.previous_page_number, 1, msg="Previous page number= %s, expected= %s" % (page.previous_page_number, 1)) self.assertEquals(page.next_page_number, 3, msg="Next page number= %s, expected= %s" % (page.next_page_number, 3)) self.assertEquals(page.start_index, 11, msg="Start index of page= %s, expected= %s" % (page.start_index, 11)) self.assertEquals(page.end_index, 20, msg="End index of page= %s, expected= %s" % (page.end_index, 20))
def home(): page = request.args.get('page',1,type=int) query = session.query(Post).order_by(Post.date_posted.desc()) posts = Paginator(query, 5) if page > posts.pages_range[-1]: return render_template("errors/404.html") posts_page = posts.page(page) show_pages = pagination(page, posts.pages_range) return render_template('home.html', posts_page=posts_page, posts=posts, show_pages=show_pages)
def get(self): '''returns all the bucketlists of the user''' usr_id = g.user.id # set limit for pagination if 'limit' in request.args: if request.args['limit'].isdigit(): limit = int(request.args['limit']) else: abort(400, message='limit parameter should be an integer') else: limit = 20 # set page of pagination if 'page' in request.args: if request.args['page'].isdigit(): page = int(request.args['page']) else: abort(400, message='page parameter should be an integer') else: page = 1 # get the bucketlists and implement search if required if 'q' in request.args: query = db_session.query(BucketList).\ filter_by(created_by=usr_id).\ filter(BucketList.name.like('%'+request.args['q']+'%')) paginator = Paginator(query, limit) current_page = paginator.page(page) bucketlists = current_page.object_list else: # get all bucketlists for this user and implement pagination query = db_session.query(BucketList).filter_by(created_by=usr_id) paginator = Paginator(query, limit) current_page = paginator.page(page) bucketlists = current_page.object_list if bucketlists is None: rv = {'bucketlists': 'none'} else: ls = [] schema = BucketListSchema() for bucketlist in bucketlists: ls.append(schema.dump(bucketlist).data) rv = { 'total_objects': current_page.paginator.count, 'total_pages': current_page.paginator.total_pages, 'current_page': current_page.number, 'has_next_page': current_page.has_next(), 'has_previous_page': current_page.has_previous(), 'bucketlists': ls } return rv, 200
def get(self, page=1): """Retrieve all bucketlists belonging to the logged in user. limit specifies the maximum number of results per page specified with default set to 20. q specifies the term to search by through the bucketlists """ try: limit = int(request.args['limit']) except BadRequestKeyError: limit = 20 if limit > 100: limit = 100 q = request.args.get('q', type=str) created_by = current_user.user_id if created_by: if q: bucketlistget = session.query(BucketList).filter_by( creator=created_by).filter( BucketList.list_name.contains(q)) else: bucketlistget = session.query(BucketList).filter_by( creator=created_by) paginate = Paginator(bucketlistget, limit) page_responses = paging(bucketlists, paginate, page) return page_responses return {'message': 'Please login to view your bucketlists'}, 401
def user_posts(username): page = request.args.get('page', 1, type=int) user = session.query(User).filter_by(username=username).first() if not user: return render_template("errors/404.html") query = session.query(Post).filter_by(author=user).order_by( Post.date_posted.desc()) posts = Paginator(query, 5) if page > posts.pages_range[-1]: return render_template("errors/404.html") posts_page = posts.page(page) show_pages = pagination(page, posts.pages_range) return render_template('user_posts.html', query=query, posts_page=posts_page, posts=posts, show_pages=show_pages, user=user)
def paginate( cls, db, per_page_limit, optional_count_query_set=None, allow_empty_first_page=True, ): return Paginator(db.query(cls), per_page_limit, optional_count_query_set, allow_empty_first_page)
def check_balance(expected_balance): lrw_balance = 0 query = Account.query paginator = Paginator(query, 20) for page in paginator: accounts = page.object_list for account in accounts: balance = account_service.get_account_balance( account=account, up_to_version=0, ).total.get(CURRENCY) lrw_balance += balance assert lrw_balance == expected_balance
def remove_redundant(processed_transactions): query = Transaction.query paginator = Paginator(query, PAGE_SIZE) for page in paginator: transactions = page.object_list for transaction in transactions: if (transaction.type == TransactionType.EXTERNAL and transaction.blockchain_version not in processed_transactions): logger.info( f"transaction with blockchain version {transaction.blockchain_version} was not found in " f"blockchain while synchronization and therefore is been deleted " ) delete_transaction_by_id(transaction.id)
def calculate_lrw_balance(up_to_version): db_balance = 0 query = Account.query paginator = Paginator(query, PAGE_SIZE) for page in paginator: accounts = page.object_list for account in accounts: balance = account_service.get_account_balance_by_id( account_id=account.id, up_to_version=up_to_version, ).total.get(CURRENCY) logger.info(f"account name {account.name} balance {balance}") db_balance += balance return db_balance
def get_reports_download_detail(self, start_date: datetime, end_date: datetime, acquirer: str, filters: list, page:int, pageSize: int): df = [] keys = [] meta= { "total": 0, "current": page, "pageSize": pageSize } try: queue_files = self.session.query(QueueDetail, QueueProcesses.acquirer.label("acquirer"))\ .select_from(QueueDetail)\ .join(QueueProcesses, QueueDetail.processId==QueueProcesses.process_id)\ .filter( between(QueueDetail.createdAt, start_date, end_date) ) if acquirer != 'ALL': queue_files= queue_files.filter(QueueProcesses.acquirer==acquirer) if filters: queue_files = self.make_search_query(filters, queue_files) paginator = Paginator(queue_files, meta["pageSize"]) page = paginator.page(meta["current"]) meta["total"] = page.paginator.count meta["countPages"] = page.paginator.total_pages meta["previous_page_number"] = page.previous_page_number meta["next_page_number"] = page.next_page_number # queue_files = queue_files.limit(50) # print(meta) # print(page.object_list) # df = pd.read_sql(queue_files.statement, self.session.bind) objects = [ { "detailId": item.detailId, "clientCode": item.clientCode, "shortName": item.shortName, "storeCode": item.storeCode, "merchantCode": item.merchantCode, "text": item.text, "error": item.error, "statusCode": item.statusCode, "fileDate": item.fileDate, "createdAt": item.createdAt.isoformat() if item.createdAt else item.createdAt, "endAt": item.endAt.isoformat() if item.endAt else item.endAt, "isDone": item.isDone, "isRunning": item.isRunning, "retrys": item.retrys, "webhookId": item.webhookId, "acquirer": acquirer } for item, acquirer in page.object_list ] # print(objects) if not objects: return df = pd.DataFrame(objects) # df.rename(columns={name: "".join([name.split('_')[0], name.split('_')[1].capitalize()]) if len(name.split('_'))>1 else name for name in df.columns}, inplace=True) # print(len(df)) df['webhookId'] = df['webhookId'].fillna(0).astype(int) keys = df.columns.tolist() df = df.to_dict(orient='records') # print(df) except Exception as error: print(str(error)) finally: self.session.close() return df, keys, meta
def getExam(): data = request.args exams_query = db_session.query(Exam) has_filter = False filter_condition = None #filterung if data.get("filter[0]"): #Fachbereich exams_query = exams_query.join(Department, Exam.department) if not has_filter: filter_condition = Department.name.like("%" + data.get("filter[0]") + "%") has_filter = True if data.get("filter[1]"): #Fach exams_query = exams_query.join(Course, Exam.course) cond = Course.name.like("%" + data.get("filter[1]") + "%") if not has_filter: filter_condition = cond else: filter_condition = and_(filter_condition, cond) has_filter = True if data.get("filter[2]"): #Typ exams_query = exams_query.join(ExamType, Exam.examType) cond = ExamType.name.like("%" + data.get("filter[2]") + "%") if not has_filter: filter_condition = cond else: filter_condition = and_(filter_condition, cond) has_filter = True if data.get("filter[3]"): #Dozent exams_query = exams_query.join(Professor, Exam.professor) cond = func.concat( Professor.firstName, ' ', Professor.lastName).like("%" + data.get("filter[3]") + "%") if not has_filter: filter_condition = cond else: filter_condition = and_(filter_condition, cond) has_filter = True if data.get("filter[4]"): #Semester cond = func.concat(Exam.year, ' ', Exam.semester).like("%" + data.get("filter[4]") + "%") if not has_filter: filter_condition = cond else: filter_condition = and_(filter_condition, cond) has_filter = True if has_filter: exams_query = exams_query.filter(filter_condition) #sortiertung exams_query = sort_exams(exams_query, data) #pagination page, per_page = pager(data) ret = {"total_rows": exams_query.count(), "rows": []} for doc in to_serializable_dict( Paginator(exams_query, per_page).page(page).object_list): ret["rows"].append({ "Fachbereich": doc["department"]["name"], "Fach": doc["course"]["name"], "Typ": doc["examType"]["name"], "Dozent": doc["professor"]["lastName"] + ", " + doc["professor"]["firstName"], "Semester": ("%d " + doc["semester"]) % doc["year"], "ID": doc["idExam"] }) return jsonify(ret)