def get_concerned_users(self, all_members=True): owners = DB.query(model.User).join(model.PackageRole) owners = owners.filter(model.PackageRole.package_id == self.dataset.id) owners = owners.filter(model.PackageRole.role == model.Role.ADMIN) admins = DB.query(model.User).filter(model.User.sysadmin == True) queries = [owners, admins] organization = model.Group.get(self.dataset.owner_org) if self.dataset.owner_org else None if organization: org_members = DB.query(model.User) org_members = org_members.join(model.Member, model.Member.table_id == model.User.id) org_members = org_members.filter( model.Member.group == organization, model.Member.state == 'active', model.Member.table_name == 'user', ) if not all_members: org_members = org_members.filter(sql.or_( model.Member.capacity == model.Role.ADMIN, model.Member.capacity == model.Role.EDITOR, )) queries.append(org_members) queries = (q.subquery().select() for q in queries) return DB.query(model.User).select_from(sql.union(*queries))
def sqlalchemy_view_declaration(cls): T1 = cls.registry.T1 TP = cls.registry.T1.aliased() T2 = cls.registry.T2 subquery = union( select([ T1.code.label('code'), TP.id.label('parent_val1'), TP.code.label('parent_code') ]).where(T1.parent_id == TP.id), select([ T1.code.label('code'), expression.literal_column("null as parent_val1"), expression.literal_column("null as parent_code") ]).where(T1.parent_id.is_(None))).alias() query = select([ T1.code.label('code'), T1.val.label('val1'), T2.val.label('val2'), subquery.c.parent_val1.label('parent_val1'), subquery.c.parent_code.label('parent_code') ]) query = query.where(subquery.c.code == T1.code) query = query.where(subquery.c.code == T2.code) return query
def get_busiest_airports() -> Response: """Get the busiest airport""" limit = request.args.get("limit", 10) query = request.args.get("query") since = int(request.args.get("since", 0)) if query: result = flights_engine.execute( union( select([flights.c.origin]).distinct().where( flights.c.origin.like(f"%{query}%")), select([flights.c.destination]).distinct().where( flights.c.destination.like(f"%{query}%")), )) if result is None: abort(404) return jsonify([row[0] for row in result]) return jsonify([ row.origin for row in flights_engine.execute( select([flights.c.origin]).where( func.coalesce(flights.c.actual_off, flights.c.actual_out) > ( select([ func.datetime(func.max(flights.c.actual_off), text(f"'-{since} hours'")) ]))).group_by(flights.c.origin).order_by( func.count().desc(), flights.c.origin).limit(limit)) ])
def permissions(request, kiosk_id): from Model import User from sqlalchemy.sql import union, select kiosk = request.db_session.query(Kiosk).filter_by(id=kiosk_id).first() ckmt = CompanyKioskManager.__table__ kkmt = KioskKioskManager.__table__ company_granted = select( [ckmt.c.company_id.label('company'), ckmt.c.user_id.label('user_id'), kkmt.c.kiosk_id.label('kiosk')], from_obj=ckmt.outerjoin(kkmt, ckmt.c.user_id == kkmt.c.user_id)).\ where(or_(kkmt.c.kiosk_id == kiosk_id, None == kkmt.c.kiosk_id)) kiosk_granted = select( [ckmt.c.company_id.label('company'), kkmt.c.user_id.label('user_id'), kkmt.c.kiosk_id.label('kiosk')], from_obj=kkmt.outerjoin(ckmt, kkmt.c.user_id == ckmt.c.user_id)).\ where(or_(kkmt.c.kiosk_id == kiosk_id, None == kkmt.c.kiosk_id)) granted = union(company_granted, kiosk_granted).alias('granted') kiosk_granted = request.db_session.query(User)\ .join(kiosk_granted.alias('kg'), User.id == kiosk_granted.c.user_id).all() granted_users = request.db_session.query(granted, User).join(User, granted.c.user_id == User.id).all() return render(request, 'kiosk_permissions.html', {'users': [u for u in kiosk.company.users if u not in kiosk_granted], 'granted': granted_users, 'kioskId': kiosk_id})
def get_concerned_users(self, all_members=True): owners = DB.query(model.User).join(model.PackageRole) owners = owners.filter(model.PackageRole.package_id == self.dataset.id) owners = owners.filter(model.PackageRole.role == model.Role.ADMIN) admins = DB.query(model.User).filter(model.User.sysadmin == True) queries = [owners, admins] organization = model.Group.get( self.dataset.owner_org) if self.dataset.owner_org else None if organization: org_members = DB.query(model.User) org_members = org_members.join( model.Member, model.Member.table_id == model.User.id) org_members = org_members.filter( model.Member.group == organization, model.Member.state == 'active', model.Member.table_name == 'user', ) if not all_members: org_members = org_members.filter( sql.or_( model.Member.capacity == model.Role.ADMIN, model.Member.capacity == model.Role.EDITOR, )) queries.append(org_members) queries = (q.subquery().select() for q in queries) return DB.query(model.User).select_from(sql.union(*queries))
def last_modified(*av): """ Return most recent timestamp for a package revision, with optionally extra where clause. """ from ckan import model where = [] for arg in av: if isinstance(arg, expression.ClauseElement) or isinstance( arg, basestring): where.append(arg) where_clauses = [ and_( model.package_table.c.revision_id == model.revision_table.c.id, *where), and_( model.package_extra_table.c.package_id == model.package_table.c.id, model.package_extra_table.c.revision_id == model.revision_table.c.id, *where), and_( model.package_relationship_table.c.subject_package_id == model.package_table.c.id, model.package_relationship_table.c.revision_id == model.revision_table.c.id, *where), and_( model.package_relationship_table.c.object_package_id == model.package_table.c.id, model.package_relationship_table.c.revision_id == model.revision_table.c.id, *where), and_( model.resource_group_table.c.package_id == model.package_table.c.id, model.resource_group_table.c.revision_id == model.revision_table.c.id, *where), and_( model.resource_group_table.c.package_id == model.package_table.c.id, model.resource_table.c.resource_group_id == model.resource_group_table.c.id, model.resource_table.c.revision_id == model.revision_table.c.id, *where), and_( model.package_tag_table.c.package_id == model.package_table.c.id, model.package_tag_table.c.revision_id == model.revision_table.c.id, *where) ] query = union(*[ select([model.revision_table.c.timestamp], x) for x in where_clauses ]).order_by("timestamp DESC").limit(1) conn = model.meta.engine.connect() result = conn.execute(query).fetchone() if result: timestamp = result[0].utctimetuple() usecs = float(result[0].microsecond) / 1e6 else: timestamp, usecs = gmtime(), 0 # use timegm instead of mktime, because we don't want it localised return timegm(timestamp) + usecs
def last_modified(*av): """ Return most recent timestamp for a package revision, with optionally extra where clause. """ from ckan import model where = [] for arg in av: if isinstance(arg, expression.ClauseElement) or isinstance(arg, basestring): where.append(arg) where_clauses = [ and_(model.package_table.c.revision_id == model.revision_table.c.id, *where), and_(model.package_extra_table.c.package_id == model.package_table.c.id, model.package_extra_table.c.revision_id == model.revision_table.c.id, *where), and_(model.package_relationship_table.c.subject_package_id == model.package_table.c.id, model.package_relationship_table.c.revision_id == model.revision_table.c.id, *where), and_(model.package_relationship_table.c.object_package_id == model.package_table.c.id, model.package_relationship_table.c.revision_id == model.revision_table.c.id, *where), and_(model.package_resource_table.c.package_id == model.package_table.c.id, model.package_resource_table.c.revision_id == model.revision_table.c.id, *where), and_(model.package_tag_table.c.package_id == model.package_table.c.id, model.package_tag_table.c.revision_id == model.revision_table.c.id, *where) ] query = union(*[select([model.revision_table.c.timestamp], x) for x in where_clauses] ).order_by("timestamp DESC").limit(1) conn = model.meta.engine.connect() result = conn.execute(query).fetchone() if result: timestamp = result[0].utctimetuple() usecs = float(result[0].microsecond) / 1e6 else: timestamp, usecs = gmtime(), 0 return mktime(timestamp) + usecs
def ListMutualFriends(self, request, context): if context.user_id == request.user_id: return api_pb2.ListMutualFriendsRes(mutual_friends=[]) with session_scope() as session: user = session.execute( select(User).where_users_visible(context).where( User.id == request.user_id)).scalar_one_or_none() if not user: context.abort(grpc.StatusCode.NOT_FOUND, errors.USER_NOT_FOUND) q1 = (select( FriendRelationship.from_user_id.label("user_id")).where( FriendRelationship.to_user_id == context.user_id).where( FriendRelationship.from_user_id != request.user_id). where(FriendRelationship.status == FriendStatus.accepted)) q2 = (select(FriendRelationship.to_user_id.label("user_id")).where( FriendRelationship.from_user_id == context.user_id).where( FriendRelationship.to_user_id != request.user_id).where( FriendRelationship.status == FriendStatus.accepted)) q3 = (select( FriendRelationship.from_user_id.label("user_id")).where( FriendRelationship.to_user_id == request.user_id).where( FriendRelationship.from_user_id != context.user_id). where(FriendRelationship.status == FriendStatus.accepted)) q4 = (select(FriendRelationship.to_user_id.label("user_id")).where( FriendRelationship.from_user_id == request.user_id).where( FriendRelationship.to_user_id != context.user_id).where( FriendRelationship.status == FriendStatus.accepted)) mutual = select(intersect(union(q1, q2), union(q3, q4)).subquery()) mutual_friends = (session.execute( select(User).where_users_visible(context).where( User.id.in_(mutual))).scalars().all()) return api_pb2.ListMutualFriendsRes(mutual_friends=[ api_pb2.MutualFriend(user_id=mutual_friend.id, username=mutual_friend.username, name=mutual_friend.name) for mutual_friend in mutual_friends ])
def are_blocked(session, user1_id, user2_id): blocked_users = (select(UserBlock.blocked_user_id).where( UserBlock.blocking_user_id == user1_id).where( UserBlock.blocked_user_id == user2_id)) blocking_users = (select(UserBlock.blocking_user_id).where( UserBlock.blocking_user_id == user2_id).where( UserBlock.blocked_user_id == user1_id)) return session.execute( select(union(blocked_users, blocking_users).subquery())).first() is not None
def _relevant_user_blocks(user_id): """ Gets list of blocked user IDs or users that have blocked this user: those should be hidden """ blocked_users = couchers_select( UserBlock.blocked_user_id).where(UserBlock.blocking_user_id == user_id) blocking_users = couchers_select( UserBlock.blocking_user_id).where(UserBlock.blocked_user_id == user_id) return couchers_select(union(blocked_users, blocking_users).subquery())
def removeDataset(self, ref): # Docstring inherited from Registry.removeDataset. if not ref.id: raise AmbiguousDatasetError(f"Cannot remove dataset {ref} without ID.") # Remove component datasets. We assume ``ref.components`` is already # correctly populated, and rely on ON DELETE CASCADE to remove entries # from DatasetComposition. for componentRef in ref.components.values(): self.removeDataset(componentRef) datasetTable = self._schema.tables["dataset"] # Remove related quanta. We actually delete from Execution, because # Quantum's primary key (quantum_id) is also a foreign key to # Execution.execution_id. We then rely on ON DELETE CASCADE to remove # the Quantum record as well as any related records in # DatasetConsumers. Note that we permit a Quantum to be deleted # without removing the Datasets it refers to, but do not allow a # Dataset to be deleting without removing the Quanta that refer to # them. A Dataset is still quite usable without provenance, but # provenance is worthless if it's inaccurate. executionTable = self._schema.tables["execution"] datasetConsumersTable = self._schema.tables["dataset_consumers"] selectProducer = select( [datasetTable.c.quantum_id] ).where( datasetTable.c.dataset_id == ref.id ) selectConsumers = select( [datasetConsumersTable.c.quantum_id] ).where( datasetConsumersTable.c.dataset_id == ref.id ) self._connection.execute( executionTable.delete().where( executionTable.c.execution_id.in_(union(selectProducer, selectConsumers)) ) ) # Remove the Dataset record itself. We rely on ON DELETE CASCADE to # remove from DatasetCollection, and assume foreign key violations # come from DatasetLocation (everything else should have an ON DELETE). try: self._connection.execute( datasetTable.delete().where(datasetTable.c.dataset_id == ref.id) ) except IntegrityError as err: raise OrphanedRecordError(f"Dataset {ref} is still present in one or more Datastores.") from err
def get_all_assessment_guids(): ''' Get all unique assessment guids from `Constants.TSB_METADATA` and `Constants.TSB_ERROR` tables. If a TSB request being processed successfully, a metadata record will be saved to `Constants.TSB_METADATA`, while an error record will be saved to `Constants.TSB_ERROR` in case of a request failed. This is the reason that this function need to look into both tables. ''' with TSBDBConnection() as conn: # query guids from metadata table tsb_metadata = conn.get_table(Constants.TSB_METADATA) query_metadata = Select( [tsb_metadata.c.state_code, tsb_metadata.c.asmt_guid]) # query guids from error message table tsb_error = conn.get_table(Constants.TSB_ERROR) query_error = Select([tsb_error.c.state_code, tsb_error.c.asmt_guid]) return conn.execute(union(query_metadata, query_error)).fetchall()
def sqlalchemy_view_declaration(cls): T1 = cls.registry.T1 TP = cls.registry.T1.aliased() T2 = cls.registry.T2 subquery = union( select([ T1.code.label('code'), TP.code.label('parent_code')] ).where(T1.parent_id == TP.id), select([ T1.code.label('code'), expression.literal_column("null as parent_code") ]).where(T1.parent_id.is_(None)) ).alias() query = select([T1.code.label('code'), T1.val.label('val1'), T2.val.label('val2'), subquery.c.parent_code.label('parent_code')]) query = query.where(subquery.c.code == T1.code) query = query.where(subquery.c.code == T2.code) return query
def base_props_selectable(self): invites = session.query(Invite.id.label("id"), Invite.person.label("person"), Invite.vote_result.label("vote_result"), literal("invite").label("type"), Invite.active.label("active")) kicks = session.query(Kick.id.label("id"), User.name.label("person"), Kick.vote_result.label("vote_result"), literal("kick").label("type"), Kick.active.label("active")).join(Kick.kicked) suggestion = session.query(Suggestion.id.label("id"), User.name.label("person"), Suggestion.vote_result.label("vote_result"), literal("suggestion").label("type"), Suggestion.active.label("active")).join( Suggestion.proposer) props = union(invites, kicks, suggestion).alias("prop") return props
def metadata_modified(self): """ Return most recent timestamp for revisions related to this package. NB Excludes changes to the package's groups """ import ckan.model as model where = [model.package_table.c.id == self.id] where_clauses = [ and_(model.package_table.c.revision_id == model.revision_table.c.id, *where), and_(model.package_extra_table.c.package_id == model.package_table.c.id, model.package_extra_table.c.revision_id == model.revision_table.c.id, *where), and_(model.package_relationship_table.c.subject_package_id == model.package_table.c.id, model.package_relationship_table.c.revision_id == model.revision_table.c.id, *where), and_(model.package_relationship_table.c.object_package_id == model.package_table.c.id, model.package_relationship_table.c.revision_id == model.revision_table.c.id, *where), and_(model.resource_group_table.c.package_id == model.package_table.c.id, model.resource_group_table.c.revision_id == model.revision_table.c.id, *where), and_(model.resource_group_table.c.package_id == model.package_table.c.id, model.resource_table.c.resource_group_id == model.resource_group_table.c.id, model.resource_table.c.revision_id == model.revision_table.c.id, *where), and_(model.package_tag_table.c.package_id == model.package_table.c.id, model.package_tag_table.c.revision_id == model.revision_table.c.id, *where) ] query = union(*[select([model.revision_table.c.timestamp], x) for x in where_clauses] ).order_by('timestamp DESC').limit(1) # Use current connection because we might be in a 'before_commit' of # a SessionExtension - only by using the current connection can we get # at the newly created revision etc. objects. conn = model.Session.connection() result = conn.execute(query).fetchone() if result: result_datetime = iso_date_to_datetime_for_sqlite(result[0]) timestamp_without_usecs = result_datetime.utctimetuple() usecs = float(result_datetime.microsecond) / 1e6 # use timegm instead of mktime, because we don't want it localised timestamp_float = timegm(timestamp_without_usecs) + usecs return datetime.datetime.utcfromtimestamp(timestamp_float)
def metadata_modified(self): """ Return most recent timestamp for revisions related to this package. NB Excludes changes to the package's groups """ from ckan import model where = [model.package_table.c.id == self.id] where_clauses = [ and_(model.package_table.c.revision_id == model.revision_table.c.id, *where), and_(model.package_extra_table.c.package_id == model.package_table.c.id, model.package_extra_table.c.revision_id == model.revision_table.c.id, *where), and_(model.package_relationship_table.c.subject_package_id == model.package_table.c.id, model.package_relationship_table.c.revision_id == model.revision_table.c.id, *where), and_(model.package_relationship_table.c.object_package_id == model.package_table.c.id, model.package_relationship_table.c.revision_id == model.revision_table.c.id, *where), and_(model.resource_group_table.c.package_id == model.package_table.c.id, model.resource_group_table.c.revision_id == model.revision_table.c.id, *where), and_(model.resource_group_table.c.package_id == model.package_table.c.id, model.resource_table.c.resource_group_id == model.resource_group_table.c.id, model.resource_table.c.revision_id == model.revision_table.c.id, *where), and_(model.package_tag_table.c.package_id == model.package_table.c.id, model.package_tag_table.c.revision_id == model.revision_table.c.id, *where) ] query = union(*[select([model.revision_table.c.timestamp], x) for x in where_clauses] ).order_by('timestamp DESC').limit(1) # Use current connection because we might be in a 'before_commit' of # a SessionExtension - only by using the current connection can we get # at the newly created revision etc. objects. conn = model.Session.connection() result = conn.execute(query).fetchone() if result: result_datetime = _types.iso_date_to_datetime_for_sqlite(result[0]) timestamp_without_usecs = result_datetime.utctimetuple() usecs = float(result_datetime.microsecond) / 1e6 # use timegm instead of mktime, because we don't want it localised timestamp_float = timegm(timestamp_without_usecs) + usecs return datetime.datetime.utcfromtimestamp(timestamp_float)
def base_props_selectable(self): invites = session.query( Invite.id .label("id"), Invite.person .label("person"), Invite.vote_result .label("vote_result"), literal("invite") .label("type"), Invite.active .label("active") ) kicks = session.query( Kick.id .label("id"), User.name .label("person"), Kick.vote_result .label("vote_result"), literal("kick") .label("type"), Kick.active .label("active") ).join(Kick.kicked) suggestion = session.query( Suggestion.id .label("id"), User.name .label("person"), Suggestion.vote_result .label("vote_result"), literal("suggestion") .label("type"), Suggestion.active .label("active") ).join(Suggestion.proposer) props = union(invites, kicks, suggestion).alias("prop") return props
def normalize_query_by_date(query, date_label, report_label, start_date=None, end_date=None, interval='1 month', granularity='1 day'): """ Fills in missing date "buckets" for an aggregate query grouped by date. Aggregate queries grouped by date are often used for generating reports, like "How many widgets did I sell on each day of last month?" These queries often look similar to this:: SELECT date_of_sale AS day, count(id) AS sales_count FROM sales WHERE date_of_sale > now() - interval '1 month' GROUP BY day ORDER BY day; This kind of query will ONLY return days that contain at least one sale. Days with zero sales will not be returned at all, leaving gaps in the report. To combat this, we can normalize the query by generating a UNION with a Postgres date series:: SELECT date_of_sale AS day, coalesce(sales_count, 0) AS sales_count FROM ( SELECT date_of_sale AS day, count(id) AS sales_count FROM sales WHERE date_of_sale > now() - interval '1 month' GROUP BY day UNION SELECT generate_series(now() - interval '1 month', now(), '1 day') AS day, 0 AS sales_count ) AS union_query ORDER BY union_query.day; Args: query (sqlalchemy.orm.query.Query): The original query grouped by date, in which the "group by date" column is labelled using column.label(date_label), and the "aggregate" column is labelled using column.label(report_label). date_label (str): The label used to label the "group by date" column. report_label (str): The label used to label the "aggregate" column. start_date (datetime): Start date of the query. end_date (datetime): End date of the query. interval (str): Alternately, the length of time from either `start_date` or `end_date` expressed as a Postgres interval. Defaults to '1 month'. granularity (str): The granularity of each date "bucket" expressed as a Postgres interval. Defaults to '1 day'. Returns: sqlalchemy.orm.query.Query: A normalized aggregate date query. """ series = generate_date_series(start_date, end_date, interval, granularity) series_query = select([ series.label(date_label), literal(0).label(report_label) ]) query = union(query, series_query).alias() query = select([ text(date_label), func.coalesce(text(report_label), literal(0)).label(report_label) ], from_obj=query).order_by(date_label) return query
# On the other hand, count() function which returns number of rows selected from a table, is rendered by following usage of func − cn.execute(select([func.count(students.c.id)])).fetchall() # The max() function is implemented by following usage of func from SQLAlchemy which will result in 85, the total maximum marks obtained − cn.execute(select([func.max(students.c.id)])).fetchall() #find min value cn.execute(select([func.min(students.c.id)])).fetchall() # find Average cn.execute(select([func.avg(students.c.id)])).fetchall() # =========================SQLAlchemy Core - Using Set Operations========================== '''union()''': from sqlalchemy.sql import union,union_all,except_,intersect u1 = union(addresses.select().where(addresses.c.email_add.like('*****@*****.**')), addresses.select().where(addresses.c.email_add.like('*****@*****.**'))) conn.execute(u1).fetchone() ua = union_all(addresses.select().where(addresses.c.email_add.like('*****@*****.**')), addresses.select().where(addresses.c.email_add.like('*****@*****.**'))) conn.execute(ua).fetchall() ue = except_(addresses.select().where(addresses.c.email_add.like('*****@*****.**')), addresses.select().where(addresses.c.postal_add.like('%Pune'))) result = conn.execute(ue) ints = intersect(addresses.select().where(addresses.c.email_add.like('*****@*****.**')), addresses.select().where(addresses.c.postal_add.like('%Pune'))) result = conn.execute(ints) #sum of febbonacy a ,b = 0,1 max_value = 4000000 fib_value = [] sum_value = 0 for i in range(100000):
def test_executing(self): # re-create a new INSERT object self.ins = self.users.insert() # execute the insert statement res = self.conn.execute(self.ins, uid=1, name='jack', fullname='Jack Jones') assert(res.inserted_primary_key == [1]) res = self.conn.execute(self.ins, uid=2, name='wendy', fullname='Wendy Williams') assert(res.inserted_primary_key == [2]) # the res variable is a ResultProxy object, analagous to DBAPI cursor # issue many inserts, the same is possible for update and delete self.conn.execute(self.addresses.insert(), [ {'id': 1, 'user_id': 1, 'email_address': '*****@*****.**'}, {'id': 2, 'user_id': 1, 'email_address': '*****@*****.**'}, {'id': 3, 'user_id': 2, 'email_address': '*****@*****.**'}, {'id': 4, 'user_id': 2, 'email_address': '*****@*****.**'} ]) # test selects on the inserted values from sqlalchemy.sql import select s = select([self.users]) res = self.conn.execute(s) u1 = res.fetchone() u2 = res.fetchone() # accessing rows assert(u1['name'] == u'jack') assert(u1['fullname'] == u'Jack Jones') assert(u2['name'] == u'wendy') assert(u2['fullname'] == u'Wendy Williams') assert(u1[1] == u1['name']) assert(u1[2] == u1['fullname']) assert(u2[1] == u2['name']) assert(u2[2] == u2['fullname']) # be sure to close the result set res.close() # use cols to access rows res = self.conn.execute(s) u3 = res.fetchone() u4 = res.fetchone() assert(u3[self.users.c.name] == u1['name']) assert(u3[self.users.c.fullname] == u1['fullname']) assert(u4[self.users.c.name] == u2['name']) assert(u4[self.users.c.fullname] == u2['fullname']) # reference individual columns in select clause s = select([self.users.c.name, self.users.c.fullname]) res = self.conn.execute(s) u3 = res.fetchone() u4 = res.fetchone() assert(u3[self.users.c.name] == u1['name']) assert(u3[self.users.c.fullname] == u1['fullname']) assert(u4[self.users.c.name] == u2['name']) assert(u4[self.users.c.fullname] == u2['fullname']) # test joins # cartesian product usrs = [row for row in self.conn.execute(select([self.users]))] addrs = [row for row in self.conn.execute(select([self.addresses]))] prod = [row for row in self.conn.execute(select([self.users, self.addresses]))] assert(len(prod) == len(usrs) * len(addrs)) # inner join on id s = select([self.users, self.addresses]).where(self.users.c.uid == self.addresses.c.user_id) inner = [row for row in self.conn.execute(s)] assert(len(inner) == 4) # operators between columns objects & other col objects/literals expr = self.users.c.uid == self.addresses.c.user_id assert('my_users.uid = addresses.user_id' == str(expr)) # see how Teradata concats two strings assert(str((self.users.c.name + self.users.c.fullname).compile(bind=self.engine)) == 'my_users.name || my_users.fullname') # built-in conjunctions from sqlalchemy.sql import and_, or_ s = select([(self.users.c.fullname + ", " + self.addresses.c.email_address).label('titles')]).where( and_( self.users.c.uid == self.addresses.c.user_id, self.users.c.name.between('m', 'z'), or_( self.addresses.c.email_address.like('*****@*****.**'), self.addresses.c.email_address.like('*****@*****.**') ) ) ) # print(s) res = self.conn.execute(s) for row in res: assert(str(row[0]) == u'Wendy Williams, [email protected]') # more joins # ON condition auto generated based on ForeignKey assert(str(self.users.join(self.addresses)) == 'my_users JOIN addresses ON my_users.uid = addresses.user_id') # specify the join ON condition self.users.join(self.addresses, self.addresses.c.email_address.like(self.users.c.name + '%')) # select from clause to specify tables and the ON condition s = select([self.users.c.fullname]).select_from( self.users.join(self.addresses, self.addresses.c.email_address.like(self.users.c.name + '%'))) res = self.conn.execute(s) assert(len(res.fetchall()) == 3) # left outer joins s = select([self.users.c.fullname]).select_from(self.users.outerjoin(self.addresses)) # outer join works with teradata dialect (unlike oracle dialect < version9) assert(str(s) == str(s.compile(dialect=self.dialect))) # test bind params (positional) from sqlalchemy import text s = self.users.select(self.users.c.name.like( bindparam('username', type_=String)+text("'%'"))) res = self.conn.execute(s, username='******').fetchall() assert(len(res), 1) # functions from sqlalchemy.sql import func, column # certain function names are known by sqlalchemy assert(str(func.current_timestamp()), 'CURRENT_TIMESTAMP') # functions can be used in the select res = self.conn.execute(select( [func.max(self.addresses.c.email_address, type_=String).label( 'max_email')])).scalar() assert(res, '*****@*****.**') # func result sets, define a function taking params x,y return q,z,r # useful for nested queries, subqueries - w/ dynamic params calculate = select([column('q'), column('z'), column('r')]).\ select_from( func.calculate( bindparam('x'), bindparam('y') ) ) calc = calculate.alias() s = select([self.users]).where(self.users.c.uid > calc.c.z) assert('SELECT my_users.uid, my_users.name, my_users.fullname\ FROM my_users, (SELECT q, z, r\ FROM calculate(:x, :y)) AS anon_1\ WHERE my_users.uid > anon_1.z', s) # instantiate the func calc1 = calculate.alias('c1').unique_params(x=17, y=45) calc2 = calculate.alias('c2').unique_params(x=5, y=12) s = select([self.users]).where(self.users.c.uid.between(calc1.c.z, calc2.c.z)) parms = s.compile().params assert('x_2' in parms, 'x_1' in parms) assert('y_2' in parms, 'y_1' in parms) assert(parms['x_1'] == 17, parms['y_1'] == 45) assert(parms['x_2'] == 5, parms['y_2'] == 12) # order by asc stmt = select([self.users.c.name]).order_by(self.users.c.name) res = self.conn.execute(stmt).fetchall() assert('jack' == res[0][0]) assert('wendy' == res[1][0]) # order by desc stmt = select([self.users.c.name]).order_by(self.users.c.name.desc()) res = self.conn.execute(stmt).fetchall() assert('wendy' == res[0][0]) assert('jack' == res[1][0]) # group by stmt = select([self.users.c.name, func.count(self.addresses.c.id)]).\ select_from(self.users.join(self.addresses)).\ group_by(self.users.c.name) res = self.conn.execute(stmt).fetchall() assert(res[0][0] == 'jack') assert(res[1][0] == 'wendy') assert(res[0][1] == res[1][1]) # group by having stmt = select([self.users.c.name, func.count(self.addresses.c.id)]).\ select_from(self.users.join(self.addresses)).\ group_by(self.users.c.name).\ having(func.length(self.users.c.name) > 4) res = self.conn.execute(stmt).fetchall() assert(res[0] == ('wendy', 2)) # distinct stmt = select([self.users.c.name]).\ where(self.addresses.c.email_address.contains(self.users.c.name)).distinct() res = self.conn.execute(stmt).fetchall() assert(len(res) == 2) assert(res[0][0] != res[1][0]) # limit stmt = select([self.users.c.name, self.addresses.c.email_address]).\ select_from(self.users.join(self.addresses)).\ limit(1) res = self.conn.execute(stmt).fetchall() assert(len(res) == 1) # offset # test union and except from sqlalchemy.sql import except_, union u = union( self.addresses.select().where(self.addresses.c.email_address == '*****@*****.**'), self.addresses.select().where(self.addresses.c.email_address.like('*****@*****.**')),)# .order_by(self.addresses.c.email_address) # print(u) # #res = self.conn.execute(u) this fails, syntax error order by expects pos integer? u = except_( self.addresses.select().where(self.addresses.c.email_address.like('%@%.com')), self.addresses.select().where(self.addresses.c.email_address.like('*****@*****.**'))) res = self.conn.execute(u).fetchall() assert(1, len(res)) u = except_( union( self.addresses.select().where(self.addresses.c.email_address.like('*****@*****.**')), self.addresses.select().where(self.addresses.c.email_address.like('*****@*****.**')) ).alias().select(), self.addresses.select(self.addresses.c.email_address.like('*****@*****.**')) ) res = self.conn.execute(u).fetchall() assert(1, len(res)) # scalar subqueries stmt = select([func.count(self.addresses.c.id)]).where(self.users.c.uid == self.addresses.c.user_id).as_scalar() # we can place stmt as any other column within another select res = self.conn.execute(select([self.users.c.name, stmt])).fetchall() # res is a list of tuples, one tuple per user's name assert(2, len(res)) u1 = res[0] u2 = res[1] assert(len(u1) == len(u2)) assert(u1[0] == u'jack') assert(u1[1] == u2[1]) assert(u2[0] == u'wendy') # we can label the inner query stmt = select([func.count(self.addresses.c.id)]).\ where(self.users.c.uid == self.addresses.c.user_id).\ label("address_count") res = self.conn.execute(select([self.users.c.name, stmt])).fetchall() assert(2, len(res)) u1 = res[0] u2 = res[1] assert(len(u1) == 2) assert(len(u2) == 2) # inserts, updates, deletes stmt = self.users.update().values(fullname="Fullname: " + self.users.c.name) res = self.conn.execute(stmt) assert('name_1' in res.last_updated_params()) assert(res.last_updated_params()['name_1'] == 'Fullname: ') stmt = self.users.insert().values(name=bindparam('_name') + " .. name") res = self.conn.execute(stmt, [{'uid': 4, '_name': 'name1'}, {'uid': 5, '_name': 'name2'}, {'uid': 6, '_name': 'name3'}, ]) # updates stmt = self.users.update().where(self.users.c.name == 'jack').values(name='ed') res = self.conn.execute(stmt) assert(res.rowcount == 1) assert(res.returns_rows is False) # update many with bound params stmt = self.users.update().where(self.users.c.name == bindparam('oldname')).\ values(name=bindparam('newname')) res = self.conn.execute(stmt, [ {'oldname': 'jack', 'newname': 'ed'}, {'oldname': 'wendy', 'newname': 'mary'}, ]) assert(res.returns_rows is False) assert(res.rowcount == 1) res = self.conn.execute(select([self.users]).where(self.users.c.name == 'ed')) r = res.fetchone() assert(r['name'] == 'ed') # correlated updates stmt = select([self.addresses.c.email_address]).\ where(self.addresses.c.user_id == self.users.c.uid).\ limit(1) # this fails, syntax error bc of LIMIT - need TOP/SAMPLE instead # Note: TOP can't be in a subquery # res = self.conn.execute(self.users.update().values(fullname=stmt)) # multiple table updates stmt = self.users.update().\ values(name='ed wood').\ where(self.users.c.uid == self.addresses.c.id).\ where(self.addresses.c.email_address.startswith('ed%')) # this fails, teradata does update from set where not update set from where # #res = self.conn.execute(stmt) stmt = self.users.update().\ values({ self.users.c.name: 'ed wood', self.addresses.c.email_address: '*****@*****.**' }).\ where(self.users.c.uid == self.addresses.c.id).\ where(self.addresses.c.email_address.startswith('ed%')) # fails but works on MySQL, should this work for us? # #res = self.conn.execute(stmt) # deletes self.conn.execute(self.addresses.delete()) self.conn.execute(self.users.delete().where(self.users.c.name > 'm')) # matched row counts # updates + deletes have a number indicating # rows matched by WHERE clause res = self.conn.execute(self.users.delete()) assert(res.rowcount == 1)
def affected_by(dep_name): """ Display which packages are possibly affected by given dependency change. """ if len(g.current_collections) != 1: abort(400) collection = g.current_collections[0] try: evr1 = RpmEVR( int(request.args['epoch1']), request.args['version1'], request.args['release1'] ) evr2 = RpmEVR( int(request.args['epoch2']), request.args['version2'], request.args['release2'] ) except (KeyError, ValueError): abort(400) # Dependencies in the evr1 to evr2 interval # Note that evr comparisons are overloaded custom comparators that invoke RPM-correct # comparisons implemented in rpmvercmp.sql deps_in = ( db.query(Dependency.id) .filter(Dependency.name == dep_name) .filter(Dependency.evr > evr1) .filter(Dependency.evr < evr2) .cte('deps_in') ) # Dependencies with greater evr than evr2 deps_higher = ( db.query(Dependency.id) .filter(Dependency.name == dep_name) .filter(Dependency.evr >= evr2) .cte('deps_higher') ) # Dependencies with lesser evr than evr1 deps_lower = ( db.query(Dependency.id) .filter(Dependency.name == dep_name) .filter(Dependency.evr <= evr1) .cte('deps_lower') ) # Get only changes where the prev_evr to curr_evr interval overlaps with evr1 to evr2 filtered_changes = union( # Changes with previous evr in the evr1 to evr2 interval db.query(AppliedChange) .filter(AppliedChange.prev_dep_id.in_(db.query(deps_in))), # Changes with current evr in the evr1 to evr2 interval db.query(AppliedChange) .filter(AppliedChange.curr_dep_id.in_(db.query(deps_in))), # Changes with both evrs "around" the evr1 to evr2 interval db.query(AppliedChange) .filter( (AppliedChange.prev_dep_id.in_(db.query(deps_lower))) & (AppliedChange.curr_dep_id.in_(db.query(deps_higher))) ), ).alias('filtered_changes') prev_build = aliased(Build) # Get a subquery for previous build state subq = db.query(prev_build.state.label('prev_state'))\ .order_by(prev_build.started.desc())\ .filter(prev_build.started < Build.started)\ .filter(prev_build.package_id == Build.package_id)\ .limit(1)\ .correlate().as_scalar() prev_dep = aliased(Dependency) curr_dep = aliased(Dependency) failed = ( db.query( prev_dep.name.label('dep_name'), prev_dep.evr.label('prev_evr'), curr_dep.evr.label('curr_evr'), AppliedChange.distance, Build.id.label('build_id'), Build.state.label('build_state'), Build.started.label('build_started'), Package.name.label('package_name'), Package.resolved.label('package_resolved'), Package.last_complete_build_state.label('package_lb_state'), subq.label('prev_build_state'), ) .select_entity_from(filtered_changes) .join(prev_dep, AppliedChange.prev_dep) .join(curr_dep, AppliedChange.curr_dep) .join(AppliedChange.build).join(Build.package) .filter_by(blocked=False, tracked=True, collection_id=collection.id) # Show only packages where the build after failed, but the previous one was ok .filter(Build.state == Build.FAILED) .filter(subq != Build.FAILED) .order_by(AppliedChange.distance, Build.started.desc()) .all() ) # Auxiliary function to compute state string for the query row def package_state(row): return Package( tracked=True, blocked=False, resolved=row.package_resolved, last_complete_build_state=row.package_lb_state, ).state_string return render_template("affected-by.html", package_state=package_state, dep_name=dep_name, evr1=evr1, evr2=evr2, collection=collection, failed=failed)
def base_props_selectable(self): invites = session.query(Invite.id, Invite.person, Invite.vote_result, literal("invite").label("type"), Invite.active) kicks = session.query(Kick.id, User.name, Kick.vote_result, literal("kick").label("type"), Kick.active).join(Kick.kicked) props = union(invites, kicks).alias("prop") return props
select([user, address]) # explicitly join select([user.c.name]).select_from(user.join(address)) # join on foreign keys if only one relationship exists user.join(address) # specify on clause user.join(address, address.c.user_id > 5) # set operations ############################################### u = union(address.select().where(), address.select().where(), ).order_by(address.c.email_address) # result ####################################################### # The result, known as a ResultProxy object, is analogous to the # DBAPI cursor object. result3 = con.execute(sel) result.fetchone() result.fetchall() for row in result: print(row) print(row['name'], row['email'])
def affected_by(dep_name): """ Display which packages are possibly affected by given dependency change. """ if len(g.current_collections) != 1: abort(400) collection = g.current_collections[0] try: evr1 = RpmEVR(int(request.args['epoch1']), request.args['version1'], request.args['release1']) evr2 = RpmEVR(int(request.args['epoch2']), request.args['version2'], request.args['release2']) except (KeyError, ValueError): abort(400) # Dependencies in the evr1 to evr2 interval # Note that evr comparisons are overloaded custom comparators that invoke RPM-correct # comparisons implemented in rpmvercmp.sql deps_in = (db.query(Dependency.id).filter( Dependency.name == dep_name).filter(Dependency.evr > evr1).filter( Dependency.evr < evr2).cte('deps_in')) # Dependencies with greater evr than evr2 deps_higher = (db.query( Dependency.id).filter(Dependency.name == dep_name).filter( Dependency.evr >= evr2).cte('deps_higher')) # Dependencies with lesser evr than evr1 deps_lower = (db.query( Dependency.id).filter(Dependency.name == dep_name).filter( Dependency.evr <= evr1).cte('deps_lower')) # Get only changes where the prev_evr to curr_evr interval overlaps with evr1 to evr2 filtered_changes = union( # Changes with previous evr in the evr1 to evr2 interval db.query(AppliedChange).filter( AppliedChange.prev_dep_id.in_(db.query(deps_in))), # Changes with current evr in the evr1 to evr2 interval db.query(AppliedChange).filter( AppliedChange.curr_dep_id.in_(db.query(deps_in))), # Changes with both evrs "around" the evr1 to evr2 interval db.query(AppliedChange).filter( (AppliedChange.prev_dep_id.in_(db.query(deps_lower))) & (AppliedChange.curr_dep_id.in_(db.query(deps_higher)))), ).alias('filtered_changes') prev_build = aliased(Build) # Get a subquery for previous build state subq = db.query(prev_build.state.label('prev_state'))\ .order_by(prev_build.started.desc())\ .filter(prev_build.started < Build.started)\ .filter(prev_build.package_id == Build.package_id)\ .limit(1)\ .correlate().as_scalar() prev_dep = aliased(Dependency) curr_dep = aliased(Dependency) failed = ( db.query( prev_dep.name.label('dep_name'), prev_dep.evr.label('prev_evr'), curr_dep.evr.label('curr_evr'), AppliedChange.distance, Build.id.label('build_id'), Build.state.label('build_state'), Build.started.label('build_started'), Package.name.label('package_name'), Package.resolved.label('package_resolved'), Package.last_complete_build_state.label('package_lb_state'), subq.label('prev_build_state'), ).select_entity_from(filtered_changes).join( prev_dep, AppliedChange.prev_dep).join( curr_dep, AppliedChange.curr_dep).join(AppliedChange.build).join( Build.package).filter_by(blocked=False, tracked=True, collection_id=collection.id) # Show only packages where the build after failed, but the previous one was ok .filter(Build.state == Build.FAILED).filter( subq != Build.FAILED).order_by(AppliedChange.distance, Build.started.desc()).all()) # Auxiliary function to compute state string for the query row def package_state(row): return Package( tracked=True, blocked=False, resolved=row.package_resolved, last_complete_build_state=row.package_lb_state, ).state_string return render_template("affected-by.html", package_state=package_state, dep_name=dep_name, evr1=evr1, evr2=evr2, collection=collection, failed=failed)
print(conn.execute(enclosing_s).fetchall()) s = select([users.c.id]).\ where(users.c.id == addresses.c.user_id).\ where(users.c.name == 'jack').\ correlate(addresses) enclosing_s = select( [users.c.name, addresses.c.email_address]).\ select_from(users.join(addresses)).\ where(users.c.id == s) print(conn.execute(enclosing_s).fetchall()) s = select([users.c.id]).\ where(users.c.id == addresses.c.user_id).\ where(users.c.name == 'jack').\ correlate_except(users) enclosing_s = select( [users.c.name, addresses.c.email_address]).\ select_from(users.join(addresses)).\ where(users.c.id == s) print(conn.execute(enclosing_s).fetchall()) s = union( addresses.select().where(addresses.c.email_address == '*****@*****.**'), addresses.select().where( addresses.c.email_address.like('*****@*****.**')), ).order_by('email_address') print(conn.execute(s).fetchall()) conn.close()
def test_executing(self): # re-create a new INSERT object self.ins = self.users.insert() # execute the insert statement res = self.conn.execute(self.ins, uid=1, name='jack', fullname='Jack Jones') assert(res.inserted_primary_key == [1]) res = self.conn.execute(self.ins, uid=2, name='wendy', fullname='Wendy Williams') assert(res.inserted_primary_key == [2]) # the res variable is a ResultProxy object, analagous to DBAPI cursor # issue many inserts, the same is possible for update and delete self.conn.execute(self.addresses.insert(), [ {'id': 1, 'user_id': 1, 'email_address': '*****@*****.**'}, {'id': 2, 'user_id': 1, 'email_address': '*****@*****.**'}, {'id': 3, 'user_id': 2, 'email_address': '*****@*****.**'}, {'id': 4, 'user_id': 2, 'email_address': '*****@*****.**'} ]) # test selects on the inserted values from sqlalchemy.sql import select s = select([self.users]) res = self.conn.execute(s) u1 = res.fetchone() u2 = res.fetchone() # accessing rows assert(u1['name'] == u'jack') assert(u1['fullname'] == u'Jack Jones') assert(u2['name'] == u'wendy') assert(u2['fullname'] == u'Wendy Williams') assert(u1[1] == u1['name']) assert(u1[2] == u1['fullname']) assert(u2[1] == u2['name']) assert(u2[2] == u2['fullname']) # be sure to close the result set res.close() # use cols to access rows res = self.conn.execute(s) u3 = res.fetchone() u4 = res.fetchone() assert(u3[self.users.c.name] == u1['name']) assert(u3[self.users.c.fullname] == u1['fullname']) assert(u4[self.users.c.name] == u2['name']) assert(u4[self.users.c.fullname] == u2['fullname']) # reference individual columns in select clause s = select([self.users.c.name, self.users.c.fullname]) res = self.conn.execute(s) u3 = res.fetchone() u4 = res.fetchone() assert(u3[self.users.c.name] == u1['name']) assert(u3[self.users.c.fullname] == u1['fullname']) assert(u4[self.users.c.name] == u2['name']) assert(u4[self.users.c.fullname] == u2['fullname']) # test joins # cartesian product usrs = [row for row in self.conn.execute(select([self.users]))] addrs = [row for row in self.conn.execute(select([self.addresses]))] prod = [row for row in self.conn.execute(select([self.users, self.addresses]))] assert(len(prod) == len(usrs) * len(addrs)) # inner join on id s = select([self.users, self.addresses]).where(self.users.c.uid == self.addresses.c.user_id) inner = [row for row in self.conn.execute(s)] assert(len(inner) == 4) # operators between columns objects & other col objects/literals expr = self.users.c.uid == self.addresses.c.user_id assert('my_users.uid = addresses.user_id' == str(expr)) # see how Teradata concats two strings assert(str((self.users.c.name + self.users.c.fullname).compile(bind=self.engine)) == 'my_users.name || my_users.fullname') # built-in conjunctions from sqlalchemy.sql import and_, or_ s = select([(self.users.c.fullname + ", " + self.addresses.c.email_address).label('titles')]).where( and_( self.users.c.uid == self.addresses.c.user_id, self.users.c.name.between('m', 'z'), or_( self.addresses.c.email_address.like('*****@*****.**'), self.addresses.c.email_address.like('*****@*****.**') ) ) ) # print(s) res = self.conn.execute(s) for row in res: assert(str(row[0]) == u'Wendy Williams, [email protected]') # more joins # ON condition auto generated based on ForeignKey assert(str(self.users.join(self.addresses)) == 'my_users JOIN addresses ON my_users.uid = addresses.user_id') # specify the join ON condition self.users.join(self.addresses, self.addresses.c.email_address.like(self.users.c.name + '%')) # select from clause to specify tables and the ON condition s = select([self.users.c.fullname]).select_from( self.users.join(self.addresses, self.addresses.c.email_address.like(self.users.c.name + '%'))) res = self.conn.execute(s) assert(len(res.fetchall()) == 3) # left outer joins s = select([self.users.c.fullname]).select_from(self.users.outerjoin(self.addresses)) # outer join works with teradata dialect (unlike oracle dialect < version9) assert(str(s) == str(s.compile(dialect=self.dialect))) # test bind params (positional) from sqlalchemy import text s = self.users.select(self.users.c.name.like( bindparam('username', type_=String)+text("'%'"))) res = self.conn.execute(s, username='******').fetchall() assert(len(res), 1) # functions from sqlalchemy.sql import func, column # certain function names are known by sqlalchemy assert(str(func.current_timestamp()), 'CURRENT_TIMESTAMP') # functions can be used in the select res = self.conn.execute(select( [func.max(self.addresses.c.email_address, type_=String).label( 'max_email')])).scalar() assert(res, '*****@*****.**') # func result sets, define a function taking params x,y return q,z,r # useful for nested queries, subqueries - w/ dynamic params calculate = select([column('q'), column('z'), column('r')]).\ select_from( func.calculate( bindparam('x'), bindparam('y') ) ) calc = calculate.alias() s = select([self.users]).where(self.users.c.uid > calc.c.z) assert('SELECT my_users.uid, my_users.name, my_users.fullname\ FROM my_users, (SELECT q, z, r\ FROM calculate(:x, :y)) AS anon_1\ WHERE my_users.uid > anon_1.z', s) # instantiate the func calc1 = calculate.alias('c1').unique_params(x=17, y=45) calc2 = calculate.alias('c2').unique_params(x=5, y=12) s = select([self.users]).where(self.users.c.uid.between(calc1.c.z, calc2.c.z)) parms = s.compile().params assert('x_2' in parms, 'x_1' in parms) assert('y_2' in parms, 'y_1' in parms) assert(parms['x_1'] == 17, parms['y_1'] == 45) assert(parms['x_2'] == 5, parms['y_2'] == 12) # order by asc stmt = select([self.users.c.name]).order_by(self.users.c.name) res = self.conn.execute(stmt).fetchall() assert('jack' == res[0][0]) assert('wendy' == res[1][0]) # order by desc stmt = select([self.users.c.name]).order_by(self.users.c.name.desc()) res = self.conn.execute(stmt).fetchall() assert('wendy' == res[0][0]) assert('jack' == res[1][0]) # group by stmt = select([self.users.c.name, func.count(self.addresses.c.id)]).\ select_from(self.users.join(self.addresses)).\ group_by(self.users.c.name) res = self.conn.execute(stmt).fetchall() assert(res[1][0] == 'jack') assert(res[0][0] == 'wendy') assert(res[0][1] == res[1][1]) # group by having stmt = select([self.users.c.name, func.count(self.addresses.c.id)]).\ select_from(self.users.join(self.addresses)).\ group_by(self.users.c.name).\ having(func.length(self.users.c.name) > 4) res = self.conn.execute(stmt).fetchall() assert(res[0] == ('wendy', 2)) # distinct stmt = select([self.users.c.name]).\ where(self.addresses.c.email_address.contains(self.users.c.name)).distinct() res = self.conn.execute(stmt).fetchall() assert(len(res) == 2) assert(res[0][0] != res[1][0]) # limit stmt = select([self.users.c.name, self.addresses.c.email_address]).\ select_from(self.users.join(self.addresses)).\ limit(1) res = self.conn.execute(stmt).fetchall() assert(len(res) == 1) # offset # test union and except from sqlalchemy.sql import except_, union u = union( self.addresses.select().where(self.addresses.c.email_address == '*****@*****.**'), self.addresses.select().where(self.addresses.c.email_address.like('*****@*****.**')),)# .order_by(self.addresses.c.email_address) # print(u) # #res = self.conn.execute(u) this fails, syntax error order by expects pos integer? u = except_( self.addresses.select().where(self.addresses.c.email_address.like('%@%.com')), self.addresses.select().where(self.addresses.c.email_address.like('*****@*****.**'))) res = self.conn.execute(u).fetchall() assert(1, len(res)) u = except_( union( self.addresses.select().where(self.addresses.c.email_address.like('*****@*****.**')), self.addresses.select().where(self.addresses.c.email_address.like('*****@*****.**')) ).alias().select(), self.addresses.select(self.addresses.c.email_address.like('*****@*****.**')) ) res = self.conn.execute(u).fetchall() assert(1, len(res)) # scalar subqueries stmt = select([func.count(self.addresses.c.id)]).where(self.users.c.uid == self.addresses.c.user_id).as_scalar() # we can place stmt as any other column within another select res = self.conn.execute(select([self.users.c.name, stmt])).fetchall() # res is a list of tuples, one tuple per user's name assert(2, len(res)) u1 = res[0] u2 = res[1] assert(len(u1) == len(u2)) assert(u1[0] == u'jack') assert(u1[1] == u2[1]) assert(u2[0] == u'wendy') # we can label the inner query stmt = select([func.count(self.addresses.c.id)]).\ where(self.users.c.uid == self.addresses.c.user_id).\ label("address_count") res = self.conn.execute(select([self.users.c.name, stmt])).fetchall() assert(2, len(res)) u1 = res[0] u2 = res[1] assert(len(u1) == 2) assert(len(u2) == 2) # inserts, updates, deletes stmt = self.users.update().values(fullname="Fullname: " + self.users.c.name) res = self.conn.execute(stmt) assert('name_1' in res.last_updated_params()) assert(res.last_updated_params()['name_1'] == 'Fullname: ') stmt = self.users.insert().values(name=bindparam('_name') + " .. name") res = self.conn.execute(stmt, [{'uid': 4, '_name': 'name1'}, {'uid': 5, '_name': 'name2'}, {'uid': 6, '_name': 'name3'}, ]) # updates stmt = self.users.update().where(self.users.c.name == 'jack').values(name='ed') res = self.conn.execute(stmt) assert(res.rowcount == 1) assert(res.returns_rows is False) # update many with bound params stmt = self.users.update().where(self.users.c.name == bindparam('oldname')).\ values(name=bindparam('newname')) res = self.conn.execute(stmt, [ {'oldname': 'jack', 'newname': 'ed'}, {'oldname': 'wendy', 'newname': 'mary'}, ]) assert(res.returns_rows is False) assert(res.rowcount == 1) res = self.conn.execute(select([self.users]).where(self.users.c.name == 'ed')) r = res.fetchone() assert(r['name'] == 'ed') # correlated updates stmt = select([self.addresses.c.email_address]).\ where(self.addresses.c.user_id == self.users.c.uid).\ limit(1) # this fails, syntax error bc of LIMIT - need TOP/SAMPLE instead # Note: TOP can't be in a subquery # res = self.conn.execute(self.users.update().values(fullname=stmt)) # multiple table updates stmt = self.users.update().\ values(name='ed wood').\ where(self.users.c.uid == self.addresses.c.id).\ where(self.addresses.c.email_address.startswith('ed%')) # this fails, teradata does update from set where not update set from where # #res = self.conn.execute(stmt) stmt = self.users.update().\ values({ self.users.c.name: 'ed wood', self.addresses.c.email_address: '*****@*****.**' }).\ where(self.users.c.uid == self.addresses.c.id).\ where(self.addresses.c.email_address.startswith('ed%')) # fails but works on MySQL, should this work for us? # #res = self.conn.execute(stmt) # deletes self.conn.execute(self.addresses.delete()) self.conn.execute(self.users.delete().where(self.users.c.name > 'm')) # matched row counts # updates + deletes have a number indicating # rows matched by WHERE clause res = self.conn.execute(self.users.delete()) assert(res.rowcount == 1)
def get(self): '''''' parser = RequestParser(trim=True) parser.add_argument('page', type=int, default=DEFAULT_PAGE) parser.add_argument('pageSize', type=int, default=DEFAULT_PAGE_SIZE) parser.add_argument('timeLower', type=int) parser.add_argument('timeUpper', type=int) args = parser.parse_args(strict=True) result_BlastBets_set = set() result_BlastBets_set.add(BlastBets.state == 2) result_BlastBetsCredit_set = set() result_BlastBetsCredit_set.add(BlastBetsCredit.state == 2) result_EntertainmentCityBetsDetail_set = set() result_EntertainmentCityBetsDetail_set.add( EntertainmentCityBetsDetail.Flag == 1) if args['timeLower']: result_BlastBets_set.add(BlastBets.actionTime >= args['timeLower']) result_BlastBetsCredit_set.add( BlastBetsCredit.betTime >= args['timeLower']) result_EntertainmentCityBetsDetail_set.add( EntertainmentCityBetsDetail.BetTime >= args['timeLower']) if args['timeUpper']: result_BlastBets_set.add( BlastBets.actionTime <= args['timeUpper'] + SECONDS_PER_DAY) result_BlastBetsCredit_set.add( BlastBetsCredit.betTime <= args['timeUpper'] + SECONDS_PER_DAY) result_EntertainmentCityBetsDetail_set.add( EntertainmentCityBetsDetail.BetTime <= args['timeUpper'] + SECONDS_PER_DAY) '''查询blast_bet表''' result_BlastBets = db.session.query( BlastBets.username.label('username'), func.sum(BlastBets.mode * BlastBets.beiShu * BlastBets.actionNum).label('betAmount'), BlastBets.state.label('state')).group_by( BlastBets.username).filter(*result_BlastBets_set).subquery() '''查询tb_bets_credit表''' result_BlastBetsCredit = db.session.query( BlastBetsCredit.memberUsername.label('username'), func.sum(BlastBetsCredit.betAmount).label('betAmount'), BlastBetsCredit.state.label('state')).group_by( BlastBetsCredit.memberUsername).filter( *result_BlastBetsCredit_set).subquery() '''查询tb_entertainment_city_bets_detail表''' result_EntertainmentCityBetsDetail = db.session.query( EntertainmentCityBetsDetail.PlayerName.label('username'), EntertainmentCityBetsDetail.ECCode.label('ECCode'), EntertainmentCityBetsDetail.childType.label('childType'), func.sum(EntertainmentCityBetsDetail.BetAmount).label('betAmount'), EntertainmentCityBetsDetail.Flag.label('state'), ).group_by( EntertainmentCityBetsDetail.PlayerName, EntertainmentCityBetsDetail.ECCode, EntertainmentCityBetsDetail.childType, ).filter(*result_EntertainmentCityBetsDetail_set).all() '''blast_bet和tb_bets_credit组合查询''' result_BB_left_l = db.session.query( result_BlastBets.c.username.label('result_BlastBets_username'), result_BlastBets.c.betAmount.label('result_BlastBets_betAmount'), result_BlastBets.c.state.label('result_BlastBets_state'), result_BlastBetsCredit.c.username.label( 'result_BlastBetsCredit_username'), result_BlastBetsCredit.c.betAmount.label( 'result_BlastBetsCredit_betAmount'), result_BlastBetsCredit.c.state.label( 'result_BlastBetsCredit_state'), ) result_BB_left_l = result_BB_left_l.outerjoin( result_BlastBetsCredit, result_BlastBetsCredit.c.username == result_BlastBets.c.username) result_BBC_right_l = db.session.query( result_BlastBets.c.username.label('result_BlastBets_username'), result_BlastBets.c.betAmount.label('result_BlastBets_betAmount'), result_BlastBets.c.state.label('result_BlastBets_state'), result_BlastBetsCredit.c.username.label( 'result_BlastBetsCredit_username'), result_BlastBetsCredit.c.betAmount.label( 'result_BlastBetsCredit_betAmount'), result_BlastBetsCredit.c.state.label( 'result_BlastBetsCredit_state')) result_BBC_right_l = result_BBC_right_l.outerjoin( result_BlastBets, result_BlastBets.c.username == result_BlastBetsCredit.c.username) result_all_1 = union(result_BB_left_l, result_BBC_right_l) # a = execute(result_all_1) # print(a) user_alias = aliased(result_all_1, name='user_alias') user_alias = db.session.query(user_alias).order_by().all()
pass else: for row in conn.execute(s): print row ### union from sqlalchemy.sql import union # The select() clauses below are much more complicated than the ones in the # original example, because we want to support MySQL, in which case, the # ORDER BY clause does not accept forms like table-name.column-name. Instead, # a alias has to be created via label(). See more: # http://dev.mysql.com/doc/refman/5.6/en/union.html u = union( select([addresses.c.id, addresses.c.user_id, addresses.c.email_address.label('eaddr')]).where( addresses.c.email_address == '*****@*****.**'), select([addresses.c.id, addresses.c.user_id, addresses.c.email_address]).where( addresses.c.email_address.like('*****@*****.**'))).order_by('eaddr') print u for row in conn.execute(u): print row from sqlalchemy.sql import except_ # MySQL does not support EXCEPT. if not re.match("mysql://", db): u = except_( addresses.select().where(addresses.c.email_address.like('%@%.com')), addresses.select().where(addresses.c.email_address.like('*****@*****.**')) ) print u