def where_from_query_question(cls, rp): where = [] query = rp.get('query', '').strip().lower() if not query: return where strict_queries = get_strict_query(query) if len(query.split(' ')) > 1 and not strict_queries: query = ','.join(['query:%s' % q for q in query.split(' ') if q.strip()]) if not query.split(','): return where for type_name, type_queries in strict_queries.iteritems(): same_type = [] for query in type_queries: if type_name == 'tags': same_type.append(or_(Questions.tags.any(func.lower(Tag.text) == query))) elif type_name == 'users': same_type.append(Questions.user_id == query) elif type_name == 'query': different_type = [cls.text.ilike('%' + query + '%'), cls.answers.any(Answers.text.ilike('%' + query + '%'))] same_type.append(or_(*different_type)) where.append(and_(*same_type)) return where
def refresh_group_list(self, type, offset_apid, u_id, group): retdata = [] if int(group) != 0: try: #attention:因为返回新的 appointments = self.db.query(Appointment). \ filter(Appointment.APtype == type, Appointment.APclosed == 0, Appointment.APvalid == 1, Appointment.APstatus == 0,or_(Appointment.APgroup.like("{}%".format(group)),Appointment.APgroup.like("%{}".format(group))), Appointment.APid < offset_apid).from_self().order_by(desc(Appointment.APcreateT)). \ limit(6).all() if appointments: APmodelHandler.ap_Model_simply(appointments, retdata, u_id) self.retjson['code'] = '10253' # 刷新成功,返回6个 self.retjson['contents'] = retdata else: print appointments.first().APtype except Exception, e: # 剩余约拍不足6个,返回剩余全部约拍 print e try: appointments = self.db.query(Appointment). \ filter(Appointment.APtype == type, Appointment.APclosed == 0, Appointment.APvalid == 1, Appointment.APstatus == 0, or_(Appointment.APgroup.like("{}%".format(group)),Appointment.APgroup.like("%{}".format(group))), Appointment.APid < offset_apid).order_by(desc(Appointment.APcreateT)). \ all() if appointments: APmodelHandler.ap_Model_simply(appointments, retdata, u_id) self.retjson['code'] = '10263' # 剩余约拍不足6个,返回剩余全部约拍 self.retjson['contents'] = retdata else: self.retjson['code'] = '10262' self.retjson['contents'] = r"没有更多约拍" except Exception, e: self.retjson['code'] = '10262' self.retjson['contents'] = r"没有更多约拍"
def get_advanced_search_query(employer_id, params, status): skills = params.get('skills') locations = params.get('locations') role = params.get('role') name = params.get('name') salary = params.get('salary') query = DBSession.query(Candidate.id).filter(Candidate.status == status) if employer_id: query = query.outerjoin(V_CANDIDATE_CURRENT_EMPLOYERS, and_(V_CANDIDATE_CURRENT_EMPLOYERS.c.candidate_id == Candidate.id, V_CANDIDATE_CURRENT_EMPLOYERS.c.employer_id == employer_id)) \ .filter(V_CANDIDATE_CURRENT_EMPLOYERS.c.candidate_id == None) if locations: query = query.join(PreferredLocation, Candidate.id == PreferredLocation.candidate_id) country_filter = set([c['country_iso'] for c in locations]) city_filter = [and_(City.name == loc['city'], City.country_iso == loc['country_iso']) for loc in locations] city_ids = DBSession.query(City.id).filter(or_(*city_filter)).all() query = query.filter(or_(PreferredLocation.city_id.in_(city_ids), PreferredLocation.country_iso.in_(country_filter))) if salary or role: query = query.join(TargetPosition) if salary: query = query.filter(TargetPosition.minimum_salary <= salary) if role: role = get_by_name_or_raise(Role, role) query = query.filter(TargetPosition.role_id == role.id) if name and employer_id: name = name.lower() employer_ids = func.array_agg(Offer.employer_id, type_=ARRAY(TEXT)).label('employer_ids') offer_query = DBSession.query(Offer.candidate_id, employer_ids).filter(Offer.accepted != None) \ .group_by(Offer.candidate_id).subquery() query = query.outerjoin(offer_query, offer_query.c.candidate_id == Candidate.id).filter( or_(cast(Candidate.id, TEXT).startswith(name), and_( or_(func.lower(Candidate.first_name).startswith(name), func.lower(Candidate.last_name).startswith(name)), or_( offer_query.c.employer_ids.any(str(employer_id)), Candidate.anonymous == False ) ) ) ) query = query.group_by(Candidate.id) if skills: query = query.join(CandidateSkill).join(Skill).filter(Skill.name.in_(skills)) \ .having(func.count(Skill.name) == len(skills)) return query
def post(self): u_auth_key = self.get_argument('authkey') request_type = self.get_argument('type') u_id = self.get_argument('uid') ap_group = self.get_argument('group') groupid = int(ap_group) ufuncs = Userinfo.Ufuncs.Ufuncs() if ufuncs.judge_user_valid(u_id, u_auth_key): if request_type == '10231': # 请求所有设定地点的摄影师发布的约拍中未关闭的6 retdata = [] try: if int(ap_group) == 0: appointments = self.db.query(Appointment). \ filter(Appointment.APtype == 1, Appointment.APclosed == 0, Appointment.APvalid == 1, Appointment.APstatus == 0).\ order_by(desc(Appointment.APid)).limit(6).all() APmodelHandler.ap_Model_simply(appointments, retdata, u_id) self.retjson['code'] = '10251' self.retjson['contents'] = retdata else: appointments = self.db.query(Appointment). \ filter(Appointment.APtype == 1, Appointment.APclosed == 0, Appointment.APvalid == 1, Appointment.APstatus == 0, or_(Appointment.APgroup.like("{}%".format(groupid)),Appointment.APgroup.like("%{}".format(groupid)))). \ order_by(desc(Appointment.APid)).limit(6).all() APmodelHandler.ap_Model_simply(appointments, retdata, u_id) self.retjson['code'] = '10251' self.retjson['contents'] = retdata except Exception, e: # 没有找到约拍 print e self.no_result_found(e) elif request_type == '10235': # 请求所有设定地点的模特发布的约拍中未关闭的 retdata = [] try: if int(ap_group) == 0: appointments = self.db.query(Appointment). \ filter(Appointment.APtype == 0, Appointment.APclosed == 0, Appointment.APvalid == 1, Appointment.APstatus == 0, ).\ order_by(desc(Appointment.APid)).limit(6).all() APmodelHandler.ap_Model_simply(appointments, retdata, u_id) self.retjson['code'] = '10252' self.retjson['contents'] = retdata else: appointments = self.db.query(Appointment). \ filter(Appointment.APtype == 0, Appointment.APclosed == 0, Appointment.APvalid == 1, Appointment.APstatus == 0, or_(Appointment.APgroup.like("{}%".format(groupid)),Appointment.APgroup.like("%{}".format(groupid)))). \ order_by(desc(Appointment.APid)).limit(6).all() APmodelHandler.ap_Model_simply(appointments, retdata, u_id) self.retjson['code'] = '10252' self.retjson['contents'] = retdata except Exception, e: self.no_result_found(e)
def query(keyword): """ ■ 根据关键字查询记录 :param keyword: 用于查询的关键字。函数会逐条记录逐列来进行匹配。 :return: 查询所得的结果集,是一个盛放数据结构对象的 list。 """ from sqlalchemy.sql.elements import or_ SessionCls = sessionmaker(bind=engine) session = SessionCls() key_for_sql_query = "%" + keyword + "%" query_boundary = or_(AUers.Name.like(key_for_sql_query), AUers.Gender.like(key_for_sql_query), AUers.Mobile.like(key_for_sql_query), AUers.QQ.like(key_for_sql_query), AUers.Grade.like(key_for_sql_query), AUers.Faculty.like(key_for_sql_query), AUers.Class.like(key_for_sql_query), AUers.DormBuild.like(key_for_sql_query), AUers.Department.like(key_for_sql_query), AUers.Group.like(key_for_sql_query), AUers.Occupation.like(key_for_sql_query), AUers.AUID.like(key_for_sql_query), AUers.Birthday.like(key_for_sql_query), AUers.JoinTime.like(key_for_sql_query)) results = session.query(AUers).filter(query_boundary).all() return results
def apply_single_amazon_mapping(mapping_id): mapping = db.session.query(Mappings).filter(Mappings.id == mapping_id).one() matches = (db.session.query(AmazonItems) .outerjoin(JournalEntries, JournalEntries.transaction_id == str(AmazonItems.id)) .filter(JournalEntries.transaction_id.is_(None)) .filter(or_(func.lower(AmazonItems.title).like('%' + mapping.keyword.lower() + '%'), func.lower(AmazonItems.category_id).like('%' + mapping.keyword.lower() + '%'))) .order_by(AmazonItems.shipment_date.desc()).all()) for match in matches: new_journal_entry = JournalEntries() new_journal_entry.transaction_id = match.id new_journal_entry.mapping_id = mapping_id new_journal_entry.transaction_source = 'amazon' new_journal_entry.timestamp = match.shipment_date if match.item_total > 0: new_journal_entry.debit_subaccount = mapping.positive_debit_subaccount_id new_journal_entry.credit_subaccount = mapping.positive_credit_subaccount_id else: raise Exception() new_journal_entry.functional_amount = match.item_total new_journal_entry.functional_currency = 'USD' new_journal_entry.source_amount = match.item_total new_journal_entry.source_currency = 'USD' db.session.add(new_journal_entry) db.session.commit()
def apply_single_amazon_mapping(mapping_id): mapping = db.session.query(Mappings).filter( Mappings.id == mapping_id).one() matches = (db.session.query(AmazonItems).outerjoin( JournalEntries, JournalEntries.transaction_id == str(AmazonItems.id)).filter( JournalEntries.transaction_id.is_(None)).filter( or_( func.lower( AmazonItems.title).like('%' + mapping.keyword.lower() + '%'), func.lower(AmazonItems.category_id).like( '%' + mapping.keyword.lower() + '%'))).order_by( AmazonItems.shipment_date.desc()).all()) for match in matches: new_journal_entry = JournalEntries() new_journal_entry.transaction_id = match.id new_journal_entry.mapping_id = mapping_id new_journal_entry.transaction_source = 'amazon' new_journal_entry.timestamp = match.shipment_date if match.item_total > 0: new_journal_entry.debit_subaccount = mapping.positive_debit_subaccount_id new_journal_entry.credit_subaccount = mapping.positive_credit_subaccount_id else: raise Exception() new_journal_entry.functional_amount = match.item_total new_journal_entry.functional_currency = 'USD' new_journal_entry.source_amount = match.item_total new_journal_entry.source_currency = 'USD' db.session.add(new_journal_entry) db.session.commit()
def apply_all_mappings(): for mapping in db.session.query(Mappings).all(): matches = (db.session.query(AmazonItems) .outerjoin(JournalEntries, JournalEntries.transaction_id == str(AmazonItems.id)) .filter(JournalEntries.transaction_id.is_(None)) .filter(or_(func.lower(AmazonItems.title).like('%' + mapping.keyword.lower() + '%'), func.lower(AmazonItems.category_id).like('%' + mapping.keyword.lower() + '%'))) .order_by(AmazonItems.shipment_date.desc()).all()) for match in matches: new_journal_entry = JournalEntries() new_journal_entry.transaction_id = match.id new_journal_entry.transaction_source = 'amazon' new_journal_entry.timestamp = match.shipment_date if match.amount > 0: try: db.session.query(Subaccounts).filter(Subaccounts.name == mapping.positive_debit_subaccount_id).one() except NoResultFound: new_subaccount = Subaccounts() new_subaccount.name = mapping.positive_debit_subaccount_id new_subaccount.parent = 'Discretionary Costs' db.session.add(new_subaccount) db.session.commit() new_journal_entry.debit_subaccount = mapping.positive_debit_subaccount_id new_journal_entry.credit_subaccount = mapping.positive_credit_subaccount_id else: raise Exception() new_journal_entry.functional_amount = match.item_total new_journal_entry.functional_currency = 'USD' new_journal_entry.source_amount = match.item_total new_journal_entry.source_currency = 'USD' db.session.add(new_journal_entry) db.session.commit()
async def get_ideas( page: int = 0, order: GetIdeasOrder = GetIdeasOrder.POPULAR, tag: str = '', search: str = '', db: Session = Depends(get_db), ): query = db.query(Idea) if tag != '': query = query.join(IdeasTagsTable).filter( IdeasTagsTable.columns.tags_value == tag.lower()) if page < 0: raise HTTPException(status_code=400, detail='Page could not be less than 1') if order == GetIdeasOrder.POPULAR: query = query.order_by(desc(Idea.likes)) if order == GetIdeasOrder.RISING: pass if order == GetIdeasOrder.OLDEST: query = query.order_by(Idea.created) if order == GetIdeasOrder.RECENT: query = query.order_by(desc(Idea.created)) if search != '': query = query.filter( or_(Idea.title.contains(search), Idea.description.contains(search))) return query.offset(page * PAGE_SIZE).limit(PAGE_SIZE).all()
def get_users_with_filters(*filters): general_filters = [User.login_enabled == True, User.roles_rel.any( and_(RoleXUser.role_name == RolesTypes.founder, RoleXUser.is_primary == True))] notif_filters = filters[0] if len(filters) > 1: notif_filters = or_(*filters) return DBSession.query(User).filter(*(general_filters + [notif_filters])).all()
def user_has_read_permission(cls, user: User): """Check if user can read the file (as SQL expression)""" if user: if user.is_admin: return and_(True) return or_(not_(cls.is_private), cls.owner_id == user.id) return not_(cls.is_private)
def _to_bool_clause(constraint): if constraint is not None: if isinstance(constraint, dict): return and_(column(k) == v for k, v in constraint.items()) else: return or_(and_(_convert_predicate(predicate) for predicate in conjunction_clause) for conjunction_clause in constraint) else: return text('')
def employee_data(request): employee_id = request.args.get('employee_id') role_actions = actions_data[staff_role_check(employee_id)] actions_keys = role_actions.keys() actions_values = role_actions.values() actions = [] for action_id, action in zip(actions_keys, actions_values): actions.append(dict(id=action_id, action=action)) session = create_db_session() employee_data = session.query(Employee).filter( Employee.id == int(employee_id)).first() employee_data = employee_schema1.dump(employee_data) result = session.query(Employee).filter( or_(Employee.first_name.like(employee_data['last_name']), Employee.last_name.like(employee_data['last_name']), Employee.other_names.like(employee_data['last_name'])), or_( or_(Employee.first_name.like(employee_data['other_names']), Employee.last_name.like(employee_data['other_names']), Employee.other_names.like(employee_data['other_names'])), or_(Employee.first_name.like(employee_data['first_name']), Employee.last_name.like(employee_data['first_name']), Employee.other_names.like( employee_data['first_name'])))).filter( Employee.id != int(employee_id)).all() # print(str(result)) # result=result.all() similar_employees = employee_schema.dump(result) session.close() response = { "role_actions": actions, "similar_employees": similar_employees } return render_response(response)
def delete_rows(engine, relation, rows): _rows = list(rows) if not _rows: return row_chunks = list(chunks(_rows, cfg.settings.get('row_delete_chunk_size', 60))) for chunk in row_chunks: whereclause = or_(and_(column(k) == row[k] for k in row) for row in chunk) del_query = delete(table(relation['name'])).where(whereclause) _execute(engine, del_query)
def getAuthLocations(token, session=None): user_id = Token.getUserId(token) if not session: session = DBSession() auth_locations = session.query(Location.id).filter( or_(Location.owner_id == user_id, Location.events.any(Event.is_published))) locations = tuple_to_list(auth_locations.all()) return locations
def delete_rows(engine, relation, rows): _rows = list(rows) if not _rows: return row_chunks = list( chunks(_rows, cfg.settings.get('row_delete_chunk_size', 60))) for chunk in row_chunks: whereclause = or_( and_(column(k) == row[k] for k in row) for row in chunk) del_query = delete(table(relation['name'])).where(whereclause) _execute(engine, del_query)
def _to_bool_clause(constraint): if constraint is not None: if isinstance(constraint, dict): return and_(column(k) == v for k, v in constraint.items()) else: return or_( and_( _convert_predicate(predicate) for predicate in conjunction_clause) for conjunction_clause in constraint) else: return text('')
def get_cn_ladder(listpage=None): if listpage is None: startindex = 0 endindex = 100 else: startindex = 100 + (listpage - 1) * 20 endindex = startindex + 20 result = db.session.query(Summary.player_name, Summary.rank, Summary.player_team_short_name, Summary.player_place, Summary.link, Summary.game_id, Summary.tier, Summary.lp, Summary.mmr, Summary.total_win, Summary.total_lose, Summary.total_win_ratio, Summary.twentyavgck, Summary.twentyavgkda, Summary.twentywinratio).filter( or_(Summary.player_country == 'CN', Summary.player_country == 'TW')).order_by(Summary.rank).all()[startindex:endindex] data = to_dict(result) return data
def insert_case_with_tagnames(self, name, content, tag_names_list, add_tag_names_list=None): '''插入case''' case = Case() case.name = name case.content = content tags = [] if tag_names_list: select = or_(*[Tag.name == tag_name for tag_name in tag_names_list]) tags = DB.session.query(Tag).filter(select).all() if add_tag_names_list: for tag_name in add_tag_names_list: tag = Tag(name=tag_name, description='') tags.append(tag) return self.insert_case_with_tags(name, content, tags)
def getAuthEvents(token, session=None, include_past=False): user_id = Token.getUserId(token) if not session: session = DBSession() auth_events = session.query(Event.id).filter( or_(Event.owner_id == user_id, Event.is_published)) # The filtration below has been removed because it's handled in the controllers. # if include_past: # auth_events = auth_events.filter(Event.is_past) events = tuple_to_list(auth_events.all()) return events
def register(): if request.method == 'POST': email = request.form['email'] password = request.form['password'] username = request.form['first_name'] + request.form['last_name'] user = User.query.filter( or_(User.username == username, User.email == email)).first() if user is None: User.insert_user(username=username, password=password, email=email) flash('Register successfully!', 'success') return redirect(url_for('login')) else: flash('Error! The username or email has already been registered.', 'danger') return render_template('register.html', title='register')
def get(self, *, room, user): """List logs by room and user""" return ( current_app.session.query(Log) .filter_by(room_id=room.id) .filter( or_( Log.receiver_id == None, # NOQA Log.user_id == user.id, Log.receiver_id == user.id, ) ) .order_by(Log.date_created.asc()) .all() )
def condition(self, claus): op = claus.op.strip().lower() return { 'and': lambda a, b: and_(a, b), 'or': lambda a, b: or_(a, b), '=': lambda a, b: a == b, '!=': lambda a, b: a != b, '>=': lambda a, b: a >= b, '<=': lambda a, b: a <= b, '>': lambda a, b: a > b, '<': lambda a, b: a < b, 'like': lambda a, b: a.like(b), 'null': lambda a, b: a == None, 'not null': lambda a, b: a != None }[op](self.bind(claus.object), self.bind(claus.other))
def retrieve_all_instances(tenant_id, schema_id, table_id, query_filters={}): """ """ criteria = [Instance.tenant_id == tenant_id, Instance.schema_id == schema_id, Instance.table_id == table_id] ext_criteria = [] if query_filters: meta_fields = dict([(field.field_name, field) for field in retrieve_schema_table_fields(tenant_id, schema_id, table_id)]) index_fields = [field_name for field_name, field in meta_fields.items() if query_filters.has_key(field_name) and field.index_single] non_index_fields = set(query_filters.keys()) - set(index_fields) query_filters = dict([(field_name, set_field_type(meta_fields[field_name]).type(value)) for field_name, value in query_filters.items()]) ext_criteria = ( [add_index_query_filter(index_field, query_filters[index_field]) for index_field in index_fields] + [add_query_filter(non_index, query_filters[non_index]) for non_index in non_index_fields] ) criteria = and_(or_(*ext_criteria), *criteria) return Instance.query.filter(criteria).all()
def get(self, id): hall_movie = HallMovie.query.get(id) print(id) hall = Hall.query.get(hall_movie.h_id) movie_order_buyed = MovieOrder.query.filter( MovieOrder.o_hall_movie_id == id).filter( or_(MovieOrder.o_status == ORDER_STATUS_PAYED_NOT_GET, MovieOrder.o_status == ORDER_STATUS_GET)).all() movie_order_locked = MovieOrder.query.filter( MovieOrder.o_hall_movie_id == id).filter( and_(MovieOrder.o_status == ORDER_STATUS_NOT_PAY, MovieOrder.o_time > datetime.datetime.now())).all() print("movie_order_buyed", movie_order_buyed) print("movie_order_locked", movie_order_locked) seats_lock = [] for movie_order in movie_order_buyed: seats = movie_order.o_seats.split("#") seats_lock += seats for movie_order in movie_order_locked: seats = movie_order.o_seats.split("#") seats_lock += seats print("seats_lock", seats_lock) seats_can_buy = list(set(hall.h_seate.split("#")) ^ set(seats_lock)) print("all", hall.h_seate.split("#")) print("seats_can_buy", seats_can_buy) hall.h_seate = "#".join(seats_can_buy) data = { "msg": "hall info ok", "status": HTTP_OK, "data": marshal(hall, hall_fields) } return data
def get(self, *, room, user, authenticated): """List logs by room and user""" if not authenticated and current_user != user: abort(HTTPStatus.UNAUTHORIZED) return ( current_app.session.query(Log) .filter_by(room_id=room.id) .filter( or_( Log.receiver_id == None, # NOQA Log.user_id == user.id, Log.receiver_id == user.id, ) ) .order_by(Log.date_created.asc()) .all() )
def insert_case_with_tagnames(self, name, content, tag_names_list, add_tag_names_list=None): '''插入case''' case = Case() case.name = name case.content = content tags = [] if tag_names_list: select = or_( *[Tag.name == tag_name for tag_name in tag_names_list]) tags = DB.session.query(Tag).filter(select).all() if add_tag_names_list: for tag_name in add_tag_names_list: tag = Tag(name=tag_name, description='') tags.append(tag) return self.insert_case_with_tags(name, content, tags)
def get_schedules_to_execute(cls): """Отбор расписаний, для которых есть подходящие по времени группы""" cur_datetime = datetime.today() cur_date = cur_datetime.date() cur_time = cur_datetime.time() last_execs_sq = db.session.query(SchGrReqExecute.begin_datetime).join( ScheduleGroupRequest, ScheduleGroupRequest.id == SchGrReqExecute.sch_group_request_id ).filter( ScheduleGroupRequest.schedule_group_id == ScheduleGroup.id, ).correlate(ScheduleGroup).order_by( SchGrReqExecute.id.desc() ).limit(1).subquery('sq') old_execs_q = db.session.query('1').filter( case([ ( ScheduleTime.type == ScheduleTimeType.DELTA, cur_datetime - last_execs_sq.c.begin_datetime > ScheduleTime.time ), ( ScheduleTime.type == ScheduleTimeType.TIME, and_( cur_time.isoformat() > ScheduleTime.time, cur_date > cast(last_execs_sq.c.begin_datetime, db.Date) ) ), ]), ).correlate(ScheduleTime) one_exec_q = SchGrReqExecute.query.join( ScheduleGroupRequest, ScheduleGroupRequest.id == SchGrReqExecute.sch_group_request_id ).filter( ScheduleGroupRequest.schedule_group_id == ScheduleGroup.id ).correlate(ScheduleGroup).limit(1) res = cls.query.join( ScheduleTime, ScheduleTime.id == cls.schedule_time_id ).join( ScheduleGroup, ScheduleGroup.id == cls.schedule_group_id ).filter( or_( old_execs_q.exists() == True, one_exec_q.exists() == False, ) ).all() return res
def apply_all_mappings(): for mapping in db.session.query(Mappings).all(): matches = (db.session.query(AmazonItems).outerjoin( JournalEntries, JournalEntries.transaction_id == str(AmazonItems.id)).filter( JournalEntries.transaction_id.is_(None)).filter( or_( func.lower( AmazonItems.title).like('%' + mapping.keyword.lower() + '%'), func.lower(AmazonItems.category_id).like( '%' + mapping.keyword.lower() + '%'))).order_by( AmazonItems.shipment_date.desc()).all()) for match in matches: new_journal_entry = JournalEntries() new_journal_entry.transaction_id = match.id new_journal_entry.transaction_source = 'amazon' new_journal_entry.timestamp = match.shipment_date if match.amount > 0: try: db.session.query(Subaccounts).filter( Subaccounts.name == mapping.positive_debit_subaccount_id).one() except NoResultFound: new_subaccount = Subaccounts() new_subaccount.name = mapping.positive_debit_subaccount_id new_subaccount.parent = 'Discretionary Costs' db.session.add(new_subaccount) db.session.commit() new_journal_entry.debit_subaccount = mapping.positive_debit_subaccount_id new_journal_entry.credit_subaccount = mapping.positive_credit_subaccount_id else: raise Exception() new_journal_entry.functional_amount = match.item_total new_journal_entry.functional_currency = 'USD' new_journal_entry.source_amount = match.item_total new_journal_entry.source_currency = 'USD' db.session.add(new_journal_entry) db.session.commit()
def search_customer(search): customer = db.session.query(Customer).join(Order).filter( Customer.id == search).first() # Hacky code to check if the records that match the customer Id exist without orders if customer == None: try: customer = Customer.query.get_or_404(search) except: print("No ID match") # if an ID matches the search return that customer if customer: return customer_schema.dump(customer) # else search rest of fields and return all customers containing subquery else: look_for = '%{}%'.format(search) all_customers = Customer.query.filter( or_(Customer.firstName.ilike(look_for), Customer.lastName.ilike(look_for), Customer.email.ilike(look_for))).all() return jsonify(filtered_customers_schema.dump(all_customers))
def operate(cls): from sqlalchemy.sql.elements import and_, or_ return { 'and': lambda a, b: and_(a, b), 'or': lambda a, b: or_(a, b), '=': lambda a, b: a == b, '!=': lambda a, b: a != b, '<>': lambda a, b: a != b, '>=': lambda a, b: a >= b, '<=': lambda a, b: a <= b, '>': lambda a, b: a > b, '<': lambda a, b: a < b, '+': lambda a, b: a + b, '-': lambda a, b: a - b, '*': lambda a, b: a - b, '/': lambda a, b: a / b, 'like': lambda a, b: a.like(b), 'not like': lambda a, b: a.notlike(b), 'is null': lambda a: a == None, 'is not null': lambda a: a != None, 'between': lambda a, b, c: a.between(b, c) }
def ListDiscussions(self, request, context): with session_scope() as session: page_size = min(MAX_PAGINATION_LENGTH, request.page_size or MAX_PAGINATION_LENGTH) next_page_id = int(request.page_token) if request.page_token else 0 node = session.query(Node).filter( Node.id == request.community_id).one_or_none() if not node: context.abort(grpc.StatusCode.NOT_FOUND, errors.COMMUNITY_NOT_FOUND) discussions = (node.official_cluster.owned_discussions.filter( or_(Discussion.id <= next_page_id, next_page_id == 0)).order_by( Discussion.id.desc()).limit(page_size + 1).all()) return communities_pb2.ListDiscussionsRes( discussions=[ discussion_to_pb(discussion, context.user_id) for discussion in discussions[:page_size] ], next_page_token=str(discussions[-1].id) if len(discussions) > page_size else None, )
def potentially_limit_query_to_account_assets( query: Query, account_id: Optional[int]) -> Query: """Filter out all assets that are not in the current user's account. For admins and CLI users, no assets are filtered out, unless an account_id is set. :param account_id: if set, all assets that are not in the given account will be filtered out (only works for admins and CLI users). For querying public assets in particular, don't use this function. """ if not running_as_cli() and not current_user.is_authenticated: raise Forbidden("Unauthenticated user cannot list assets.") user_is_admin = (running_as_cli() or current_user.has_role(ADMIN_ROLE) or current_user.has_role(ADMIN_READER_ROLE)) if account_id is None and user_is_admin: return query # allow admins to query assets across all accounts if (account_id is not None and account_id != current_user.account_id and not user_is_admin): raise Forbidden("Non-admin cannot access assets from other accounts.") account_id_to_filter = (account_id if account_id is not None else current_user.account_id) return query.filter( or_( GenericAsset.account_id == account_id_to_filter, GenericAsset.account_id == null(), ))
def search(): q = "%" + request.args.get("book") + "%" books = Books.query.filter( or_(Books.isbn.like(q), Books.author.like(q), Books.title.like(q))).order_by(Books.title).all() return render_template("results.html", books=books)
def tokens_search_through_fields(corpus_id): """ Page to search tokens through fields (Form, POS, Lemma, Morph) within a corpus :param corpus_id: Id of the corpus """ corpus = Corpus.query.get_or_404(corpus_id) if not corpus.has_access(current_user): abort(403) kargs = {} # make a dict with values splitted for each OR operator fields = {} for name in ("lemma", "form", "POS", "morph"): if request.method == "POST": value = strip_or_none(request.form.get(name)) else: value = strip_or_none(request.args.get(name)) # split values with the '|' OR operator but keep escaped '\|' ones if value is None: fields[name] = "" else: fields[name] = prepare_search_string(value) kargs[name] = value # all search combinations search_branches = [{ "lemma": lemma, "form": form, "POS": pos, "morph": morph } for lemma in fields["lemma"] for form in fields["form"] for pos in fields["POS"] for morph in fields["morph"]] value_filters = [] # for each branch filter (= OR clauses if any) for search_branch in search_branches: branch_filters = [WordToken.corpus == corpus_id] # for each field (lemma, pos, form, morph) for name, value in search_branch.items(): branch_filters.extend( column_search_filter(getattr(WordToken, name), value)) value_filters.append(branch_filters) # there is at least one OR clause if len(value_filters) > 1: and_filters = [ and_(*branch_filters) for branch_filters in value_filters ] flattened_filters = or_(*and_filters) tokens = WordToken.query.filter(flattened_filters).order_by( WordToken.order_id) else: if len(value_filters) == 1: value_filters = value_filters[0] tokens = WordToken.query.filter(*value_filters).order_by( WordToken.order_id) page = int_or(request.args.get("page"), 1) per_page = int_or(request.args.get("limit"), 100) tokens = tokens.paginate(page=page, per_page=per_page) return render_template_with_nav_info( 'main/tokens_search_through_fields.html', corpus=corpus, tokens=tokens, **kargs)
def subjects_search_list(request, *args, **kw): context = request.context if not request.has_perm(perm_global_search_subjects): raise APIError(403, "forbidden") exclude_leaves = request.validated_params.body.get("exclude_leaves", None) parent_subjecttype_id = request.validated_params.body.get("parent_subjecttype_id", None) parent_subject_id = request.validated_params.body.get("parent_subject_id", None) stj = t_subjecttypes.outerjoin(t_subjecttypes_subjecttypes, t_subjecttypes_subjecttypes.c.subjecttype_id == t_subjecttypes.c.id) q = select([t_subjecttypes.c.id], from_obj=stj) if parent_subjecttype_id is not None: q = q.where( t_subjecttypes_subjecttypes.c.part_of_id == parent_subjecttype_id ) if exclude_leaves is not None: et = t_subjecttypes_subjecttypes.alias() eq = select([et.c.subjecttype_id]).where(et.c.part_of_id == t_subjecttypes.c.id) q = q.where(exists(eq)) subjecttype_ids = [x["id"] for x in DBSession.execute(q).fetchall()] cols = [ t_subjects.c.id, t_subjects.c.subjecttype_id, t_subjects.c.name, t_subjects.c.lat, t_subjects.c.lng, t_subjects.c.language_id, t_subjects.c.timezone, t_subjects.c.created_at, ] j = t_subjects if parent_subject_id is not None: sq = text(""" WITH RECURSIVE nodes_cte(subject_id, subjecttype_id, name, part_of_id, depth, path) AS ( SELECT g1.id, g1.subjecttype_id, g1.name, NULL::bigint as part_of_id, 1::INT as depth, g1.id::TEXT as path FROM subjects as g1 LEFT JOIN subjects_subjects ss ON ss.subject_id=g1.id WHERE ss.part_of_id = :subject_id UNION ALL SELECT c.subject_id, p.subjecttype_id, p.name, c.part_of_id, p.depth + 1 AS depth, (p.path || '->' || g2.id ::TEXT) FROM nodes_cte AS p, subjects_subjects AS c JOIN subjects AS g2 ON g2.id=c.subject_id WHERE c.part_of_id = p.subject_id ) SELECT * FROM nodes_cte """).bindparams(subject_id=parent_subject_id)\ .columns(subject_id=Integer, subjecttype_id=Integer,name=String,part_of_id=Integer,depth=Integer,path=String)\ .alias() j = j.outerjoin(sq, sq.c.subject_id == t_subjects.c.id) cols += [ sq.c.path, sq.c.name.label("inherited_by_name"), sq.c.subjecttype_id.label("inherited_by_subjecttype_id") ] subjects_query = select(cols, from_obj=j).where(t_subjects.c.subjecttype_id.in_(subjecttype_ids)) include_search = request.validated_params.body.get("include_search", None) if include_search: subjects_query = subjects_query.where(or_( t_subjects.c.name.ilike("%" + include_search + "%"), )) limit = request.validated_params.body.get("limit", None) if limit: subjects_query = subjects_query.limit(limit) offset = request.validated_params.body.get("offset", None) if offset: subjects_query = subjects_query.offset(offset) result = DBSession.execute(subjects_query).fetchall() subjects = {} for r in result: if not r["id"] in subjects: path = r["path"] if "path" in r and r["path"] is not None else "" inherited_by_name = r["inherited_by_name"] if "inherited_by_name" in r and r["inherited_by_name"] is not None else "" inherited_by_subjecttype_id = r["inherited_by_subjecttype_id"] if "inherited_by_subjecttype_id" in r and r["inherited_by_subjecttype_id"] is not None else "" subjects[r["id"]] = { 'id': r["id"], 'subjecttype_id': r["subjecttype_id"], 'name': r["name"], 'created_at': r["created_at"], 'path': path, 'inherited_by_subjecttype_id': inherited_by_subjecttype_id, 'inherited_by_name': inherited_by_name, 'in_parent': True if path else False, 'directly_in_parent': len(path)>0 and not "->" in path, 'inherited_by': path.split("->")[0] if len(path)>0 and "->" in path else None } return r_subjectlist.output({ "subjects": list(subjects.values()) })
def __init__(self, filename, replace): AbstractImporter.__init__(self, errorKey=Constant.ERROR_KEY_COPY, filename=filename, replace=replace, previousStatus=ConstantStatus.FACT_STATUS, actualStatus=ConstantStatus.COPY_STATUS) self.customConceptList = GenericDao().getAllResult(objectClazz=CustomConcept, condition=(or_(CustomConcept.fillStrategy == "COPY_CALCULATE", CustomConcept.fillStrategy == "COPY")), session=self.session)
def tokens_search_through_fields(corpus_id): """ Page to search tokens through fields (Form, POS, Lemma, Morph) within a corpus :param corpus_id: Id of the corpus """ corpus = Corpus.query.get_or_404(corpus_id) if not corpus.has_access(current_user): abort(403) columns = tuple(["form"] + [ col if col == "POS" else col.lower() for col in corpus.get_columns_headings() ]) input_values = {} # make a dict with values splitted for each OR operator fields = {} source_dict = request.form if request.method == "POST" else request.args for name in columns: value = strip_or_none(source_dict.get(name)) input_values[name] = value # split values with the '|' OR operator but keep escaped '\|' ones fields[name] = prepare_search_string( value) if value is not None else "" # all search combinations search_branches = [ dict(prod) for prod in product(*[[(field, value) for value in fields[field]] for field in fields]) ] value_filters = [] # for each branch filter (= OR clauses if any) for search_branch in search_branches: branch_filters = [WordToken.corpus == corpus_id] # for each field (lemma, pos, form, morph) for name, value in search_branch.items(): branch_filters.extend( column_search_filter(getattr(WordToken, name), value)) value_filters.append(branch_filters) if not value_filters: # If the search is empty, we only search for the corpus_id value_filters.append([WordToken.corpus == corpus_id]) # there is at least one OR clause # get sort arguments (sort per default by WordToken.order_id) order_by = { "order_id": WordToken.order_id, "lemma": func.lower(WordToken.lemma), "pos": func.lower(WordToken.POS), "form": func.lower(WordToken.form), "morph": func.lower(WordToken.morph), }.get(request.args.get("orderBy"), WordToken.order_id) args = [] if len(value_filters) > 1: and_filters = [ and_(*branch_filters) for branch_filters in value_filters ] args = [or_(*and_filters)] elif len(value_filters) == 1: args = value_filters[0] tokens = WordToken.query.filter(*args).order_by(order_by.desc() if bool( int(request.args.get("desc", "0"))) # default sort order is ascending else order_by) page = int_or(request.args.get("page"), 1) per_page = int_or(request.args.get("limit"), 100) tokens = tokens.paginate(page=page, per_page=per_page) return render_template_with_nav_info( 'main/tokens_search_through_fields.html', search_kwargs={ "corpus_id": corpus.id, **input_values }, changed=corpus.changed(tokens.items), corpus=corpus, tokens=tokens, **input_values)
def post(self): user = g.user args = parse.parse_args() hall_movie_id = args.get("hall_movie_id") o_seats = args.get("o_seats") """ 座位存在 座位没有被锁单或者下单 """ #可用座位编号 hall = Hall.query.get(HallMovie.query.get(hall_movie_id).h_id) movie_order_buyed = MovieOrder.query.filter( MovieOrder.o_hall_movie_id == hall_movie_id).filter( or_(MovieOrder.o_status == ORDER_STATUS_PAYED_NOT_GET, MovieOrder.o_status == ORDER_STATUS_GET)).all() movie_order_locked = MovieOrder.query.filter( MovieOrder.o_hall_movie_id == hall_movie_id).filter( and_(MovieOrder.o_status == ORDER_STATUS_NOT_PAY, MovieOrder.o_time > datetime.datetime.now())).all() print("movie_order_buyed", movie_order_buyed) print("movie_order_locked", movie_order_locked) seats_lock = [] for movie_order in movie_order_buyed: seats = movie_order.o_seats.split("#") seats_lock += seats for movie_order in movie_order_locked: seats = movie_order.o_seats.split("#") seats_lock += seats print("seats_lock", seats_lock) print("hall", hall) seats_can_buy = list(set(hall.h_seate.split("#")) ^ set(seats_lock)) o_seats_list = o_seats.split("#") #判断当前选择的座位号是不是可选择座位号的子集 if not set(o_seats_list).issubset(set(seats_can_buy)): abort("座位选择错误,请重新选择") #可用座位编号 mover_order = MovieOrder() mover_order.o_hall_movie_id = hall_movie_id mover_order.o_seats = o_seats mover_order.o_user_id = user.id mover_order.o_time = datetime.datetime.now() + datetime.timedelta( minutes=15) ''' try: movie_order1 = MovieOrder.query.get(1) movie_order2 = MovieOrder.query.get(2) db.session.delete(movie_order1) db.session.delete(movie_order2) except Exception as e: print(e) db.session.rollback() else: db.session.commit() ''' if not mover_order.save(): abort(400, msg="下单失败") data = { "msg": "下单成功", "status": HTTP_OK, "data": marshal(mover_order, movie_order_fields) } return data