def getleaderboard(self, current_userID): """ Get users and fields needed for a leaderboard preordered. Args: current_userID (str): The ID of the logged in user. Returns: A list of dictionary results for the leaderboard ordered by total. The returned format is: result.userID: ID of the user. result.username: Username of the user. result.sharesvalue: Total value of shares user has. result.balance: Current balance for user. result.totalvalue: Total shares values and user balance. result.ranking: Ranking of the user by totalvalue, A dictionary of results for the current user """ # Initialse session with self.sessionmanager() as session: # Query all the shares and user shares query = session.query( User.userID, User.username, func.ifnull(func.sum(Usershare.quantity * Share.currentprice), 0).label("sharesvalue"), User.balance, func.ifnull(func.sum(Usershare.quantity * Share.currentprice) + User.balance, User.balance).label("totalvalue") ) # Outer join query with usershare and share tables query = query.outerjoin(Usershare).outerjoin(Share) # Group the query by user ID query = query.group_by(User.userID) # Order the query by total query = query.order_by(desc("totalvalue")) # Get the results result = query.all() leaderboard = [] rankiterator = 1 for row in result: dictionary = { 'userID': row.userID, 'username': row.username, 'sharesvalue': row.sharesvalue, 'balance': row.balance, 'totalvalue': row.totalvalue, 'ranking': rankiterator} if row.userID == current_userID: current_user_info = dictionary leaderboard.append(dictionary) rankiterator = rankiterator + 1 # Return leaderboard return leaderboard, current_user_info
def save(self): session = DBInstance.session try: kwargs = { 'sync_status': self.sync_status, 'last_proc_time': datetime.now() } # 周期性任务状态重置 if self.is_once == 'F': kwargs['sync_status'] = 0 if self.succ_num: kwargs['succ_num'] = DBTask.succ_num + self.succ_num if self.fail_num: kwargs['fail_num'] = DBTask.fail_num + self.fail_num if self.log_info: # 丢弃历史日志 if getattr(self, 'log_new', None): kwargs['log_info'] = self.log_info else: kwargs['log_info'] = func.ifnull(DBTask.log_info, '') + self.log_info kwargs['log_info'] += u'\n' stmt = update(DBTask).where(DBTask.sync_id == self.sync_id).values( **kwargs) session.execute(stmt) session.commit() except BaseException as e: session.rollback() raise DBOperateError(currentFuncName(self), e) finally: session.close()
def get_services(upper_engine_reading, lower_engine_reading, crane_id): if (upper_engine_reading != ''): services_and_logs_upper = db.session.query(Service, ServiceLog).outerjoin(ServiceLog).\ filter((func.ifnull(ServiceLog.service_log_lastupdate_odo, 0) + Service.service_interval) <= Markup.escape(upper_engine_reading)).\ filter(Service.crane_id == crane_id).\ filter(Service.engine_type == "upper") else: # if the lower engine reading is an empty string, we don't want to return anything for that engine. # the below line of code produces an empty result that will later be unioned services_and_logs_upper = db.session.query(Service, ServiceLog).outerjoin(ServiceLog).filter(0==1) if (lower_engine_reading != ''): services_and_logs_lower = db.session.query(Service, ServiceLog).outerjoin(ServiceLog).\ filter((func.ifnull(ServiceLog.service_log_lastupdate_odo, 0) + Service.service_interval) <= Markup.escape(lower_engine_reading)).\ filter(Service.crane_id == crane_id).\ filter(Service.engine_type == "lower") else: # if the lower engine reading is an empty string, we don't want to return anything for that engine. # the below line of code produces an empty result that will later be unioned services_and_logs_lower = db.session.query(Service, ServiceLog).outerjoin(ServiceLog).filter(0==1) services_and_logs = services_and_logs_lower.union_all(services_and_logs_upper).order_by(Service.engine_type, Service.service_name) services = [] for service, service_log in services_and_logs: if service_log is None: lastupdate = "No Record" else: lastupdate = service_log.service_log_lastupdate services.append( {"service_id": service.service_id, "service_name": service.service_name, "service_operation": service.service_operation, "service_interval": service.service_interval, "service_capacity_gallons": "N/A" if service.service_capacity_gallons == 0 else service.service_capacity_gallons, "service_capacity_liters": "N/A" if service.service_capacity_liters == 0 else service.service_capacity_gallons, "engine_type": service.engine_type.capitalize(), "service_lastupdate": "<no record>" if service_log is None else service_log.service_log_lastupdate.strftime("%B %d, %Y"), "service_lastupdate_odo": "<no record>" if service_log is None else service_log.service_log_lastupdate_odo}) return services
def users_chart(self): query = db.session.query( func.ifnull(Document.checked_by_user_id, Document.created_by_user_id), func.count(Document.id), ).group_by(Document.created_by_user_id) rows = self.filter(query).all() users = dict((u.id, u.short_name()) for u in User.query.filter(User.id.in_(r[0] for r in rows))) return { 'values': dict((users.get(r[0], 'None'), r[1]) for r in rows) }
def query_case_by_claim(company_name): session, connection = connect_company(company_name) claim_policy = session.query(Claim.claim_id, Claim.policy_id).subquery() case_set = session.query( ClaimTransaction.claim_id, func.sum(ClaimTransaction.transaction_amount).label('set')).filter( ClaimTransaction.transaction_type == 'set case reserve').group_by( ClaimTransaction.claim_id).subquery() case_takedown = session.query( ClaimTransaction.claim_id, func.sum( ClaimTransaction.transaction_amount).label('takedown')).filter( ClaimTransaction.transaction_type == 'reduce case reserve').group_by( ClaimTransaction.claim_id).subquery() case_query = session.query( case_set.c.claim_id, (func.ifnull(case_set.c.set, 0) - func.ifnull( case_takedown.c.takedown, 0)).label('case_reserve')).outerjoin( case_takedown, case_set.c.claim_id == case_takedown.c.claim_id).subquery() claim_case = session.query( claim_policy.c.claim_id, claim_policy.c.policy_id, func.ifnull( case_query.c.case_reserve, 0).label('case_reserve')).outerjoin( case_query, claim_policy.c.claim_id == case_query.c.claim_id).statement case_reserve = pd.read_sql(claim_case, connection) connection.close() return case_reserve
def filter_players(query, page_size=None, page=None): if page_size is None: page_size = 20 if page is None: page = 0 results = Player.query.filter( or_(Player.username.ilike('%%%s%%' % query), Player.nickname.ilike('%%%s%%' % query))).options( joinedload(Player.user)).order_by( func.ifnull(Player.nickname, Player.username)).limit(page_size).offset( page * page_size) return list(results)
def list_project_view(): projects = (db.session.query( Project.id, Project.name, Project.shortcut, Project.phase, Project.start_date, Project.end_date, Project.active, func.ifnull(func.sum(Hour.amount), "0").label("sum"), ).outerjoin(Hour, Project.shortcut == Hour.project_shortcut).group_by( Project.id).all()) return render_template("project_list.html", projects=projects, user=current_user)
def _get_patients(payment_min, payment_max): stmt = db.session.query(Payment.patient_id, func.sum(Payment.amount).label('total_amount')) if payment_min: stmt = stmt.filter(Payment.amount >= payment_min) if payment_max: stmt = stmt.filter(Payment.amount <= payment_max) stmt = stmt.group_by(Payment.patient_id).subquery() query = db.session.query(Patient, func.ifnull(stmt.c.total_amount, 0.0)). \ outerjoin(stmt, Patient.id == stmt.c.patient_id) patients_tuple = query.all() patients_list = map(_sql_tuple_to_patients, patients_tuple) patients_json = getPatientsSchema.dump(patients_list) return patients_json
def filter_players(query, page_size=None, page=None): if page_size is None: page_size = 20 if page is None: page = 0 results = Player.query.filter( or_( Player.username.ilike('%%%s%%' % query), Player.nickname.ilike('%%%s%%' % query) ) ).options( joinedload(Player.user) ).order_by( func.ifnull(Player.nickname, Player.username) ).limit( page_size ).offset( page * page_size ) return list(results)
def query_paid_by_claim(company_name): session, connection = connect_company(company_name) claim_policy = session.query(Claim.claim_id, Claim.policy_id).subquery() payment_query = session.query( ClaimTransaction.claim_id, func.sum( ClaimTransaction.transaction_amount).label('paid_loss')).filter( ClaimTransaction.transaction_type == 'claim payment').group_by( ClaimTransaction.claim_id).subquery() claim_paid = session.query( claim_policy.c.claim_id, claim_policy.c.policy_id, func.ifnull( payment_query.c.paid_loss, 0).label('paid_loss')).outerjoin( payment_query, claim_policy.c.claim_id == payment_query.c.claim_id).statement claim_payments = pd.read_sql(claim_paid, connection) connection.close() return claim_payments
def get_workers(): from sqlalchemy.sql import func, and_, or_ return session.query(Emp.ename, Emp.sal + func.ifnull(Emp.comm, 0)).filter( and_(Emp.deptno == 10), or_(Emp.job == 'MANAGER', (Emp.sal + func.ifnull(Emp.comm, 0)) > 3000))
def get_annual_income(): from sqlalchemy.sql import func return session.query(Emp.ename, Emp.sal + func.ifnull(Emp.comm, 0))
def send_json(): ''' api: year - filter by year: year=2016 or year=2016,2018,.. type - filter by type: type=techreport or type=article. Possible types are: Incollection Inbook Inproceedings Article PhDThesis Book "" Misc Proceedings Conference Techreport <null> author - filter by one or many authors: author=703 or author=703,709 categories - filter by category ID(s): categories=62 or categories=62,72 kops - if kops=true one will see all publications which are listed in kops. The DB contains <null> and "" values for Publications which are not listed in kops! keywords - filter by keyword ID(s): ''' # default is 1024 and too small for this query db.session.execute("SET SESSION group_concat_max_len = 1000000") cat_pub_subq = db.session.query(Categories.id.label("cat_id"), Categories.name.label("cat_name"), Categories.parent_id, Categories_publications.publication_id) \ .join(Categories_publications, Categories.id == Categories_publications.category_id) \ .join(Publications, Categories_publications.publication_id == Publications.id) \ .subquery() ''' You can dynamically construct the "OR" and "AND" --> see .filter in the query. Because of this feature one can define generic filters. ''' filters = request.args.to_dict() # filters['keywords'] = (filters['keywords'].split(',')) # print(filters['keywords']) result = { "publications": {i: {**r[0].to_dict(), 'authors': uniquifySorted([ {'id': a, 'forename': r[2].split(',')[j], 'surname': r[3].split(',')[j], 'cleanname': r[4].split(',')[j] } for j, a in enumerate(r[1].split(','))]), 'documents': checkForEmtyDocument(r), 'categories': ceckForEmtyCategories(r) } for i, r in enumerate(db.session.query( Publications, func.group_concat(func.ifnull(Authors.id, '').op("ORDER BY")(Authors_publications.position)), func.group_concat(func.ifnull(Authors.forename, '').op("ORDER BY")(Authors_publications.position)), func.group_concat(func.ifnull(Authors.surname, '').op("ORDER BY")(Authors_publications.position)), func.group_concat(func.ifnull(Authors.cleanname, '').op("ORDER BY")(Authors_publications.position)), func.group_concat(func.ifnull(Documents.id, '').op("SEPARATOR")(literal_column('\';\''))), func.group_concat(func.ifnull(Documents.publication_id, '').op("SEPARATOR")(literal_column('\';\''))), func.group_concat(func.ifnull(Documents.visible, '').op("SEPARATOR")(literal_column('\';\''))), func.group_concat(func.ifnull(Documents.remote, '').op("SEPARATOR")(literal_column('\';\''))), func.group_concat(func.ifnull(Documents.filename, '').op("SEPARATOR")(literal_column('\';\''))), func.group_concat(func.ifnull(cat_pub_subq.c.cat_id, '').op("SEPARATOR")(literal_column('\';\''))), func.group_concat(func.ifnull(cat_pub_subq.c.cat_name, '').op("SEPARATOR")(literal_column('\';\''))), func.group_concat(func.ifnull(cat_pub_subq.c.parent_id, '').op("SEPARATOR")(literal_column('\';\''))), ) \ .outerjoin(Documents, Publications.id == Documents.publication_id) \ .outerjoin(cat_pub_subq, Publications.id == cat_pub_subq.c.publication_id) \ .filter(Publications.id == Authors_publications.publication_id) \ .filter(Authors.id == Authors_publications.author_id) \ .filter(Publications.year.in_(filters['year'].split(',')) if 'year' in filters else true()) \ .filter(Publications.type.in_([filters['type']]) if 'type' in filters else true() )\ .having(or_((func.group_concat(Authors.id).op('regexp')('(^|,)' + str(a) + '(,|$)') for a in filters['author'].split(',')) if 'author' in filters else true())) \ .having(and_((func.group_concat(cat_pub_subq.c.cat_id).op('regexp')('(^|,)' + str(a) + '(,|$)') for a in filters['category'].split(',')) if 'category' in filters else true())) \ .filter((and_(Publications.kops != "", Publications.kops != None) if filters['kops'] == 'true' else or_(Publications.kops == None, Publications.kops == "")) if 'kops' in filters else true()) \ .filter((Publications.id.in_(pubIDusesAllKeywords(filters['keywords']))) if 'keywords' in filters else true())\ .filter(Publications.public == 1)\ .group_by(Publications.id) \ .order_by(Publications.year.desc(), Publications.id.desc()) .all())}, "file_dir": "uploadedFiles" } return jsonify(result)