def build_comparisons(s, ast, with_conversion): qname, op, value = ast owner, name, group = StatisticName.parse(qname, default_activity_group=UNDEF) if value is None: if op == '=': return get_source_ids_for_null(s, owner, name, group, with_conversion), True else: return aliased( union(*[ get_source_ids(s, owner, name, op, value, group, type) for type in StatisticJournalType if type != StatisticJournalType.STATISTIC ])).select(), False elif isinstance(value, str): return get_source_ids(s, owner, name, op, value, group, StatisticJournalType.TEXT), False elif isinstance(value, dt.datetime): return get_source_ids(s, owner, name, op, value, group, StatisticJournalType.TIMESTAMP), False else: qint = get_source_ids(s, owner, name, op, value, group, StatisticJournalType.INTEGER) qfloat = get_source_ids(s, owner, name, op, value, group, StatisticJournalType.FLOAT) return aliased(union(qint, qfloat)).select(), False
def eval(self, builder): """ return a query object """ dbh = builder._get_dbh() tokens = self.value[1:] expr_1 = self.value[0].eval( builder ) while tokens: op = tokens[0] eval_2 = tokens[1].eval( builder ) tokens = tokens[2:] if op == '|': eval_1 = sqla.union(eval_1, eval_2) elif op == '&': eval_1 = sqla.intersect(eval_1, eval_2) elif op == ':': eval_1 = sqla.except_( dbh.session().query(dbh.Sample.id).filter( dbh.Sample.id.in_( sqla.union(eval_1, eval_2)) ), dbh.session().query(dbh.Sample.id).filter( dbh.Sample.iid.n_( sqla.intersect(eval_1, eval_2)) ) ) q = dbh.session().query(dbh.Sample.id).filter( dbh.Sample.id.in_( eval_1 ) ) return q
def test_sqlalchemy(self): engine = create_engine('sqlite:///:memory:') Base = declarative_base() Session = sessionmaker(engine) class Table(Base): __tablename__ = 'table' id = Column(Integer, primary_key=True) Base.metadata.create_all(engine) with Session() as s: q1 = select(Table).filter(Table.id == 1) q2 = select(Table).filter(Table.id == 2) q3 = union(q1, q2).subquery() with Session() as s: q1 = select(Table).filter(Table.id == 1) q2 = select(Table).filter(Table.id == 2) cte = union(q1, q2).cte() q3 = select(Table).filter(Table.id.in_(cte)) q4 = select(Table).filter(Table.id.in_(cte)) q5 = union(q3, q4).subquery() log.debug(q5)
def test_intersect_unions_2(self): u = intersect( union(select([t1.c.col3, t1.c.col4]), select([t3.c.col3, t3.c.col4])).alias().select(), union(select([t2.c.col3, t2.c.col4]), select([t3.c.col3, t3.c.col4])).alias().select(), ) wanted = [("aaa", "ccc"), ("bbb", "aaa"), ("ccc", "bbb")] found = self._fetchall_sorted(u.execute()) eq_(found, wanted)
def test_intersect_unions_2(self): u = intersect( union( select([t1.c.col3, t1.c.col4]), select([t3.c.col3, t3.c.col4]), ).alias().select(), union( select([t2.c.col3, t2.c.col4]), select([t3.c.col3, t3.c.col4]), ).alias().select()) wanted = [('aaa', 'ccc'), ('bbb', 'aaa'), ('ccc', 'bbb')] found = self._fetchall_sorted(u.execute()) eq_(found, wanted)
def test_intersect_unions_2(self): u = intersect( union( select([t1.c.col3, t1.c.col4]), select([t3.c.col3, t3.c.col4]), ).alias().select(), union( select([t2.c.col3, t2.c.col4]), select([t3.c.col3, t3.c.col4]), ).alias().select() ) wanted = [('aaa', 'ccc'), ('bbb', 'aaa'), ('ccc', 'bbb')] found = self._fetchall_sorted(u.execute()) eq_(found, wanted)
def timeseries_all(cls, table_names, agg_unit, start, end, geom=None): # For each table in table_names, generate a query to be unioned selects = [] for name in table_names: table = cls.get_by_dataset_name(name) ts_select = table.timeseries(agg_unit, start, end, geom) selects.append(ts_select) # Union the time series selects to get a panel panel_query = sa.union(*selects)\ .order_by('dataset_name')\ .order_by('time_bucket') panel_vals = session.execute(panel_query) panel = [] for dataset_name, ts in groupby(panel_vals, lambda row: row.dataset_name): # ts gets closed after it's been iterated over once, # so we need to store the rows somewhere to iterate over them twice. rows = [row for row in ts] # If no records were found, don't include this dataset if all([row.count == 0 for row in rows]): continue ts_dict = {'dataset_name': dataset_name, 'items': []} for row in rows: ts_dict['items'].append({ 'datetime': row.time_bucket.date(), # UTC time 'count': row.count }) panel.append(ts_dict) return panel
def test_union(self): t1 = table( 't1', column('col1'), column('col2'), column('col3'), column('col4')) t2 = table( 't2', column('col1'), column('col2'), column('col3'), column('col4')) s1, s2 = select( [t1.c.col3.label('col3'), t1.c.col4.label('col4')], t1.c.col2.in_(['t1col2r1', 't1col2r2'])), \ select([t2.c.col3.label('col3'), t2.c.col4.label('col4')], t2.c.col2.in_(['t2col2r2', 't2col2r3'])) u = union(s1, s2, order_by=['col3', 'col4']) self.assert_compile(u, 'SELECT t1.col3 AS col3, t1.col4 AS col4 ' 'FROM t1 WHERE t1.col2 IN (:col2_1, ' ':col2_2) UNION SELECT t2.col3 AS col3, ' 't2.col4 AS col4 FROM t2 WHERE t2.col2 IN ' '(:col2_3, :col2_4) ORDER BY col3, col4') self.assert_compile(u.alias('bar').select(), 'SELECT bar.col3, bar.col4 FROM (SELECT ' 't1.col3 AS col3, t1.col4 AS col4 FROM t1 ' 'WHERE t1.col2 IN (:col2_1, :col2_2) UNION ' 'SELECT t2.col3 AS col3, t2.col4 AS col4 ' 'FROM t2 WHERE t2.col2 IN (:col2_3, ' ':col2_4)) AS bar')
def main(): db = define.connect() all_media = sa.union( sa.select([orm.UserMediaLink.mediaid]), sa.select([orm.SubmissionMediaLink.mediaid]), sa.select([orm.MediaMediaLink.describee_id]), sa.select([orm.MediaMediaLink.described_with_id]), ).alias('all_media') q = ( db.query(orm.MediaItem) .with_polymorphic([orm.DiskMediaItem]) .outerjoin(all_media) .filter(all_media.c.mediaid == None)) count = q.count() for e, media_item in enumerate(q, start=1): sys.stdout.write('\r%d/%d' % (e, count)) sys.stdout.flush() db.delete(media_item) try: os.unlink(media_item.full_file_path) except OSError as e: if e.errno == errno.ENOENT: continue raise db.flush() print
def getTokenUserFunds(request, token): """Get the sum of a token for all users Arguments: request {[type]} -- [description] token {[type]} -- [description] Returns: [type] -- [description] """ users_r = request.dbsession.query( Funds.user_id.label('user_id'), func.sum(Funds.value).label("total")).filter( Funds.token == token).group_by(Funds.user_id) users_s = request.dbsession.query( Funds.sender_id.label('user_id'), func.sum(-Funds.value).label("total")).filter( Funds.sender_id.isnot(None)).filter(Funds.token == token).group_by( Funds.sender_id) users_u = union(users_r, users_s).alias('total') return request.dbsession.query( users_u.columns.user_id, func.sum(users_u.columns.total).label('total')).group_by( users_u.columns.user_id)
def query(cls): def select_1(): stmt = sqlalchemy.text("select * from pytis_view_user_menu()") u = stmt.columns(sqlalchemy.Column( 'menuid', sqlalchemy.Integer()), ).alias('u') h = sql.t.EvPytisHelp.alias('h') return sqlalchemy.select( sql.reorder_columns(cls._exclude(h), [ 'help_id', 'menuid', 'fullname', 'title', 'description', 'menu_help', 'spec_name', 'spec_description', 'spec_help', 'page_id', 'parent', 'ord', 'content', 'position', 'position_nsub', 'changed', 'removed' ]), from_obj=[h.join(u, h.c.menuid == u.c.menuid)]) def select_2(): h = sql.t.EvPytisHelp.alias('h') return sqlalchemy.select( sql.reorder_columns(cls._exclude(h), [ 'help_id', 'menuid', 'fullname', 'title', 'description', 'menu_help', 'spec_name', 'spec_description', 'spec_help', 'page_id', 'parent', 'ord', 'content', 'position', 'position_nsub', 'changed', 'removed' ]), from_obj=[h], whereclause=h.c.menuid.is_(None), ) return sqlalchemy.union(select_1(), select_2())
def test_union_label(self): s1 = select([func.foo("hoho").label('x')]) s2 = select([func.foo("Bar").label('y')]) stmt = union(s1, s2).order_by("x") self.assert_compile( stmt, "SELECT foo(:foo_1) AS x UNION SELECT foo(:foo_2) AS y ORDER BY x")
def update(self, engine): with engine.begin() as conn: # delete removed relations conn.execute(self.delete(self.rels.select_delete())) # collect all changed relations in a temporary table # 1. relations added or modified sels = [sa.select([self.rels.cc.id])] # 2. relations with modified geometries w = self.ways sels.append( sa.select([saf.func.unnest(w.c.rels).label('id')], distinct=True).where( w.c.id.in_(w.select_add_modify()))) conn.execute( 'DROP TABLE IF EXISTS __tmp_osgende_routes_updaterels') conn.execute( CreateTableAs('__tmp_osgende_routes_updaterels', sa.union(*sels), temporary=False)) tmp_rels = sa.Table('__tmp_osgende_routes_updaterels', sa.MetaData(), autoload_with=conn) # 3. parent relation of all of them conn.execute(tmp_rels.insert().from_select( tmp_rels.c, sa.select([self.rtree.c.parent], distinct=True).where( self.rtree.c.child.in_(sa.select([tmp_rels.c.id]))))) # and insert/update all self._insert_objects(conn, self.rels.c.id.in_(tmp_rels.select())) tmp_rels.drop(conn)
def get_published_briefs(self): team_brief_query = (db.session.query( TeamBrief.brief_id.label('brief_id'), func.array_agg(func.substring( User.email_address, '@(.*)')).label('domain')).join(Team).join(User).filter( Team.status == 'completed').group_by(TeamBrief.brief_id)) brief_user_query = (db.session.query( BriefUser.brief_id.label('brief_id'), func.array_agg( func.substring(User.email_address, '@(.*)')).label('domain')).join(User).group_by( BriefUser.brief_id)) subquery = union(team_brief_query, brief_user_query).alias('result') result = (db.session.query( Brief.id, Brief.data['organisation'].astext.label('organisation'), Brief.published_at, Brief.withdrawn_at, Brief.data['title'].astext.label('title'), Brief.data['sellerSelector'].astext.label('openTo'), Brief.data['areaOfExpertise'].astext.label('brief_category'), Lot.name.label('brief_type'), subquery.columns.domain[1].label('publisher_domain')).join( subquery, Brief.id == subquery.columns.brief_id).join(Lot).filter( Brief.published_at.isnot(None)).order_by(Brief.id).all()) return [r._asdict() for r in result]
def determine_fetches(db_session, cred): for thread in db_session.query(Thread).filter_by(closed=False): update_thread_status(thread, cred) db_session.flush() incomplete_page_ids = ( sa.select([ThreadPost.page_id]) .group_by(ThreadPost.page_id) .having(sa.func.count(ThreadPost.id) < 40) .as_scalar() ) incomplete_pages = sa.select( [ThreadPage.thread_id, ThreadPage.page_num], from_obj=sa.join(ThreadPage, Thread) ).where(sa.and_(ThreadPage.id.in_(incomplete_page_ids), Thread.closed == sa.false())) fetch_status = ( sa.select( [ThreadPage.thread_id.label("thread_id"), sa.func.max(ThreadPage.page_num).label("last_fetched_page")] ) .group_by(ThreadPage.thread_id) .alias("fetch_status") ) unfetched_pages = sa.select( [ Thread.id.label("thread_id"), sa.func.generate_series(fetch_status.c.last_fetched_page + 1, Thread.page_count).label("page_num"), ], from_obj=sa.join(Thread, fetch_status, Thread.id == fetch_status.c.thread_id), ) fetched_first_pages = sa.select([ThreadPage.thread_id]).where(ThreadPage.page_num == 1).as_scalar() unfetched_first_pages = sa.select( [Thread.id.label("thread_id"), sa.literal(1, sa.Integer).label("page_num")], from_obj=Thread ).where(Thread.id.notin_(fetched_first_pages)) q = sa.union(incomplete_pages, unfetched_pages, unfetched_first_pages) q = q.order_by(q.c.thread_id.asc(), q.c.page_num.asc()) return db_session.execute(q).fetchall()
def query(cls): def select_1(): h = sql.t.EvPytisHelp.alias("h") u = sqlalchemy.select(["*"], from_obj=["pytis_view_user_menu()"]).alias("u") return sqlalchemy.select( sql.reorder_columns( cls._exclude(h), [ "help_id", "menuid", "fullname", "title", "description", "menu_help", "spec_name", "spec_description", "spec_help", "page_id", "parent", "ord", "content", "position", "position_nsub", "changed", "removed", ], ), from_obj=[h.join(u, sql.gR("h.menuid = u.menuid"))], ) def select_2(): h = sql.t.EvPytisHelp.alias("h") return sqlalchemy.select( sql.reorder_columns( cls._exclude(h), [ "help_id", "menuid", "fullname", "title", "description", "menu_help", "spec_name", "spec_description", "spec_help", "page_id", "parent", "ord", "content", "position", "position_nsub", "changed", "removed", ], ), from_obj=[h], whereclause="h.menuid is null", ) return sqlalchemy.union(select_1(), select_2())
def top_10_fields_by_prob(): a = LocalizationTile b = FieldTile a_lo = a.nested_lo.label('a_lo') a_hi = a.nested_hi.label('a_hi') b_lo = b.nested_lo.label('b_lo') b_hi = b.nested_hi.label('b_hi') query1 = db.session.query( a_lo, a_hi, b_lo, b_hi, FieldTile.field_id.label('field_id'), LocalizationTile.localization_id.label('localization_id'), LocalizationTile.probdensity.label('probdensity')) query2 = union( query1.join(b, a_lo.between(b_lo, b_hi)), query1.join(b, b_lo.between(a_lo, a_hi)), ).cte() lo = func.greatest(query2.c.a_lo, query2.c.b_lo) hi = func.least(query2.c.a_hi, query2.c.b_hi) area = (hi - lo + 1) * healpix.PIXEL_AREA prob = func.sum(query2.c.probdensity * area).label('probability') query = db.session.query(query2.c.localization_id, query2.c.field_id, prob).group_by(query2.c.localization_id, query2.c.field_id).order_by( prob.desc()).limit(10) return query.all()
def get_unclassified(self, all_snapshots, on_time_grads, late_grads, dropouts, unconfirmed_high_gpa, unconfirmed_low_gpa): student_lookup = all_snapshots.c.student_lookup grade = all_snapshots.c.grade on_time_lookups = sql.select([on_time_grads.c.student_lookup]) late_lookups = sql.select([late_grads.c.student_lookup]) dropout_lookups = sql.select([dropouts.c.student_lookup]) unconfirmed_high_lookups = sql.select( [unconfirmed_high_gpa.c.student_lookup]) unconfirmed_low_lookups = sql.select( [unconfirmed_low_gpa.c.student_lookup]) u = sql.union(on_time_lookups, late_lookups, dropout_lookups, unconfirmed_high_lookups, unconfirmed_low_lookups) return\ sql.select([ sql.distinct(student_lookup).label('student_lookup'), sql.literal(original_labels_config.unclassified).label('label') ]).\ where( db_expr.and_( student_lookup.notin_(u), grade >= 12 ) )
def get_supplier_contacts_union(self): authorised_representative = select([Supplier.code, Supplier.data['email'].astext.label('email_address')]) business_contact = select([Supplier.code, Supplier.data['contact_email'].astext.label('email_address')]) user_email_addresses = (select([User.supplier_code.label('code'), User.email_address]) .where(User.active)) return union(authorised_representative, business_contact, user_email_addresses).alias('email_addresses')
def test_select_from_plain_union(self): table = self.tables.some_table s1 = select([table]).where(table.c.id == 2) s2 = select([table]).where(table.c.id == 3) u1 = union(s1, s2).alias().select() self._assert_result(u1.order_by(u1.c.id), [(2, 2, 3), (3, 3, 4)])
def test_distinct_selectable_in_unions(faux_conn): table = some_table(faux_conn) s1 = select([table]).where(table.c.id == 2).distinct() s2 = select([table]).where(table.c.id == 3).distinct() u1 = union(s1, s2).limit(2) assert_result(faux_conn, u1.order_by(u1.c.id), [(2, 2, 3), (3, 3, 4)])
def fetch_submissions(hdlr_name=None): """ :param hdlr_name: Name of the handler that requests subsmissions. :return: Return submissions with 'pid' greater than the milestones added by the handler with 'handler_name' and sorted by 'pid', which is equivalent to sorted by 'submit_time'. If hdlr_name is not specified or there are not any milestones under the name of a handler, all submissions are returned. An empty list is returned if there are no available submissions. :rtype: [Submission] :caller: Handler """ mlst = select([t_milestone.c.submission_pid, t_milestone.c.handler_name]) \ .where(t_milestone.c.handler_name == hdlr_name) \ .order_by(t_milestone.c.submission_pid.desc()).limit(1) mlst = union(select([mlst]), select([None, None]).where(~exists(mlst))) s = select([t.c.oj, t.c.problem_id, t.c.problem_title, t.c.problem_url, t.c.submit_time, t.c.timezone, t.c.pid]) \ .where((mlst.c.submission_pid == None) | (t.c.pid > mlst.c.submission_pid) & (mlst.c.handler_name == hdlr_name)) \ .order_by(t.c.pid) with engine.connect() as conn: return [Submission(*d) for d in conn.execute(s)]
def test_compare_col_identity(self): stmt1 = ( select([table_a.c.a, table_b.c.b]) .where(table_a.c.a == table_b.c.b) .alias() ) stmt1_c = ( select([table_a.c.a, table_b.c.b]) .where(table_a.c.a == table_b.c.b) .alias() ) stmt2 = union(select([table_a]), select([table_b])) equivalents = {table_a.c.a: [table_b.c.a]} is_false( stmt1.compare(stmt2, use_proxies=True, equivalents=equivalents) ) is_true( stmt1.compare(stmt1_c, use_proxies=True, equivalents=equivalents) ) is_true( (table_a.c.a == table_b.c.b).compare( stmt1.c.a == stmt1.c.b, use_proxies=True, equivalents=equivalents, ) )
def moveNode(self, id_, moveTo): # update t_data upd = update( self.t_data).where(self.t_data.c.node == 5).values(parent=3) self.connection.execute(upd) # update t_path subtree = select([self.t_path.c.node ]).where(self.t_path.c.ancestor == 5) path = select([self.t_path.c.ancestor]).where(self.t_path.c.node == 5) # deletion cond = and_( or_(self.t_path.c.node == 5, self.t_path.c.node.in_(subtree)), self.t_path.c.ancestor.in_(path)) delet = self.t_path.delete().where(cond) self.connection.execute(delet) # insertion subtree = select([self.t_path.c.node ]).where(self.t_path.c.ancestor == 5) path = select([self.t_path.c.ancestor]).where(self.t_path.c.node == 5) path_y = select([self.t_path.c.ancestor ]).where(self.t_path.c.node == 3) subset = select([column('node'), column('ancestor')]).select_from( union( select([ literal_column('1').label('sortir'), literal_column('5').label('node'), literal_column('3').label('ancestor') ]), select([2, 5, path_y]), select([3, subtree, 3]), select([4, subtree, path_y])).order_by('sortir')) addToPath = self.t_path.insert().from_select(['node', 'ancestor'], subset) self.connection.execute(addToPath)
def _do_get_provider_count_and_objs(self, **kw): '''Custom getter function respecting lesson Returns the result count from the database and a query object ''' # TODO: Code duplication with CRC?! qry = Submission.query # Process lesson filter if self.lesson: q1 = (qry.join(Submission.user).join(lesson_members).join(Lesson) .filter(Lesson.id == self.lesson.id).order_by(None)) q2 = (qry.join(Submission.user).join(team_members).join(Team) .filter(Team.lesson_id == self.lesson.id).order_by(None)) qry = qry.select_from(union(q1, q2)).order_by(Submission.id) filters = kw.pop('filters', dict()) for filter in filters: if isinstance(filters[filter], (list, tuple, set)): qry = qry.filter(getattr(Submission, filter).in_(filters[filter])) else: qry = qry.filter(getattr(Submission, filter) == filters[filter]) # Process filters from url kwfilters = kw exc = False try: kwfilters = self.__provider__._modify_params_for_dates(self.__model__, kwfilters) except ValueError as e: log.info('Could not parse date filters', exc_info=True) flash('Could not parse date filters: "%s".' % e.message, 'error') exc = True try: kwfilters = self.__provider__._modify_params_for_relationships(self.__model__, kwfilters) except (ValueError, AttributeError) as e: log.info('Could not parse relationship filters', exc_info=True) flash('Could not parse relationship filters: "%s". ' 'You can only filter by the IDs of relationships, not by their names.' % e.message, 'error') exc = True if exc: # Since non-parsed kwfilters are bad, we just have to ignore them now kwfilters = {} for field_name, value in kwfilters.iteritems(): field = getattr(self.__model__, field_name) try: if self.__provider__.is_relation(self.__model__, field_name) and isinstance(value, list): # pragma: no cover value = value[0] qry = qry.filter(field.contains(value)) else: qry = qry.filter(field == value) except: log.warn('Could not create filter on query', exc_info=True) # Get total count count = qry.count() return count, qry
def test_distinct_selectable_in_unions(self): table = self.tables.some_table s1 = select([table]).where(table.c.id == 2).distinct() s2 = select([table]).where(table.c.id == 3).distinct() u1 = union(s1, s2).limit(2) self._assert_result(u1.order_by(u1.c.id), [(2, 2, 3), (3, 3, 4)])
def test_compare_col_identity(self): stmt1 = ( select([table_a.c.a, table_b.c.b]) .where(table_a.c.a == table_b.c.b) .alias() ) stmt1_c = ( select([table_a.c.a, table_b.c.b]) .where(table_a.c.a == table_b.c.b) .alias() ) stmt2 = union(select([table_a]), select([table_b])) stmt3 = select([table_b]) equivalents = {table_a.c.a: [table_b.c.a]} is_false( stmt1.compare(stmt2, use_proxies=True, equivalents=equivalents) ) is_true( stmt1.compare(stmt1_c, use_proxies=True, equivalents=equivalents) ) is_true( (table_a.c.a == table_b.c.b).compare( stmt1.c.a == stmt1.c.b, use_proxies=True, equivalents=equivalents, ) )
def _lookup(self, assignment_id, *args): try: assignment_id = int(assignment_id) # TODO: Use SQLAlchemy magic on model to make queries on assignment easier q1 = (Assignment.query.filter(Assignment.id == assignment_id).join( Assignment._lti).order_by(None)) q2 = (Assignment.query.filter( Assignment.id == assignment_id).join(Sheet).join(Event).join( Event.lti).order_by(None)) assignment = Assignment.query.select_from(union(q1, q2)).one() except ValueError: flash('Invalid LTI Assignment id: %s' % assignment_id, 'error') abort(400) except NoResultFound: flash('LTI Assignment %d not found' % assignment_id, 'error') abort(404) except MultipleResultsFound: # pragma: no cover log.error('Database inconsistency: LTI Assignment %d', assignment_id, exc_info=True) flash( 'An error occurred while accessing LTI Assignment %d' % assignment_id, 'error') abort(500) controller = LTIAssignmentController(assignment) return controller, args
def _lookup(self, assignment_id, *args): try: assignment_id = int(assignment_id) # TODO: Use SQLAlchemy magic on model to make queries on assignment easier q1 = Assignment.query.filter(Assignment.id == assignment_id).join(Assignment._lti).order_by(None) q2 = ( Assignment.query.filter(Assignment.id == assignment_id) .join(Sheet) .join(Event) .join(Event.lti) .order_by(None) ) assignment = Assignment.query.select_from(union(q1, q2)).one() except ValueError: flash("Invalid LTI Assignment id: %s" % assignment_id, "error") abort(400) except NoResultFound: flash("LTI Assignment %d not found" % assignment_id, "error") abort(404) except MultipleResultsFound: # pragma: no cover log.error("Database inconsistency: LTI Assignment %d", assignment_id, exc_info=True) flash("An error occurred while accessing LTI Assignment %d" % assignment_id, "error") abort(500) controller = LTIAssignmentController(assignment) return controller, args
def list_worksheets(self, owner_id=None): ''' Return a list of row dicts, one per worksheet. These dicts do NOT contain ALL worksheet items; this method is meant to make it easy for a user to see the currently existing worksheets. Included worksheet items are those that define metadata that one will likely want to see in a list view (e.g. title). ''' cols_to_select = [cl_worksheet.c.id, cl_worksheet.c.uuid, cl_worksheet.c.name, cl_worksheet.c.owner_id, cl_group_object_permission.c.permission] if owner_id is None: # query for public worksheets stmt = select(cols_to_select).\ where(cl_worksheet.c.uuid == cl_group_object_permission.c.object_uuid).\ where(cl_group_object_permission.c.group_uuid == self.public_group_uuid) else: # query for worksheets owned by owner_id cols1 = cols_to_select[:4] cols1.extend([literal(GROUP_OBJECT_PERMISSION_ALL).label('permission')]) stmt1 = select(cols1).where(cl_worksheet.c.owner_id == owner_id) # query for worksheets visible to owner_id or co-owned by owner_id stmt2_groups = select([cl_user_group.c.group_uuid]).\ where(cl_user_group.c.user_id == owner_id) stmt2 = select(cols_to_select).\ where(cl_worksheet.c.uuid == cl_group_object_permission.c.object_uuid).\ where(or_( cl_group_object_permission.c.group_uuid.in_(stmt2_groups), cl_group_object_permission.c.group_uuid == self.public_group_uuid)).\ where(cl_worksheet.c.owner_id != owner_id) stmt = union(stmt1, stmt2) with self.engine.begin() as connection: rows = connection.execute(stmt).fetchall() if not rows: return [] uuids = set(row.uuid for row in rows) item_rows = connection.execute( cl_worksheet_item.select().\ where(cl_worksheet_item.c.worksheet_uuid.in_(uuids)).\ where(or_( cl_worksheet_item.c.type == 'title', cl_worksheet_item.c.type == 'description')) ).fetchall() row_dicts = [dict(row) for row in sorted(rows, key=lambda item: item['id'])] uuid_index_map = {} for i in range(0, len(row_dicts)): row_dict = row_dicts[i] row_dict.update({'items': []}) uuid_index_map[row_dict['uuid']] = i for item_row in item_rows: idx = uuid_index_map.get(item_row.worksheet_uuid, -1) if idx < 0: raise IntegrityError('Got item %s without worksheet' % (item_row,)) row_dicts[idx]['items'].append(dict(item_row)) return row_dicts
def test_union_limit_offset(self): emp, dep, conn = self._emp_d_fixture() s1 = select([emp.c.name]) s2 = select([dep.c.name]) u1 = union(s1, s2).order_by(emp.c.name).limit(3).offset(2) r = self._exec_stmt(conn, u1) eq_(r.fetchall(), [('Sales', ), ('ed', ), ('jack', )])
def query(class_): return sqlalchemy.union( sqlalchemy.select([sql.c.Foo.id, sql.c.Bar.description], from_obj=[sql.t.Foo.join(sql.t.Bar)]), sqlalchemy.select([ sql.c.Foo2.id, sqlalchemy.literal_column("'xxx'", sqlalchemy.String) ]))
def test_union_label(self): s1 = select([func.foo("hoho").label("x")]) s2 = select([func.foo("Bar").label("y")]) stmt = union(s1, s2).order_by("x") self.assert_compile( stmt, "SELECT foo(:foo_1) AS x UNION SELECT foo(:foo_2) AS y ORDER BY x", )
def members_query(self, qry=None): if not qry: qry = User.query qry = qry.select_from(union( qry.join(lesson_members).filter_by(lesson_id=self.id).order_by(None), qry.join(team_members).join(Team).filter_by(lesson_id=self.id).order_by(None), )).order_by(User.id) return qry
def test_union_column(self): s1 = select([table1]) s2 = select([table1]) stmt = union(s1, s2).order_by("name") self.assert_compile( stmt, "SELECT mytable.myid, mytable.name, mytable.description FROM " "mytable UNION SELECT mytable.myid, mytable.name, " "mytable.description FROM mytable ORDER BY name")
def _list_by_category(self, balance_uid, expenses = True, incomes = True): model = request.environ['sqlalchemy.model'] db = request.environ['sqlalchemy.session'] try: user_uid = h.authenticated_user().uid except: return { "failure": Messages.pemissionDenied() } if not Operations(db, model).can_see_balance(user_uid, balance_uid): return { "failure": Messages.permissionDenied() } now = date.today() select_expenses = select( [model.ExpenseCategory.c.uid.label('uid'), model.ExpenseCategory.c.name.label('name'), func.sum(model.BalanceChange.c.amount).label('summary')], and_(model.BalanceChange.is_income==False, model.ExpenseCategory.uid==model.BalanceChange.expense_category_uid, model.BalanceChange.balance_uid==balance_uid, model.BalanceChange.occurred_on >= date(now.year, now.month, 1)), from_obj=[model.expense_categories_table, model.balance_changes_table], group_by=[model.ExpenseCategory.c.uid, model.ExpenseCategory.c.name]) select_incomes = select( [model.IncomeCategory.c.uid.label('uid'), model.IncomeCategory.c.name.label('name'), func.sum(model.BalanceChange.c.amount).label('summary')], and_(model.BalanceChange.is_income==True, model.IncomeCategory.uid==model.BalanceChange.income_category_uid, model.BalanceChange.balance_uid==balance_uid, model.BalanceChange.occurred_on >= date(now.year, now.month, 1)), from_obj=[model.income_categories_table, model.balance_changes_table], group_by=[model.IncomeCategory.c.uid, model.IncomeCategory.c.name]) if expenses and incomes: query = union(select_expenses, select_incomes) else: query = expenses and select_expenses or select_incomes balance_changes = db.execute(query.order_by('name')).fetchall() total = len(balance_changes) try: page_nr = request.params['page_nr'] except: page_nr = 1 try: items_per_page = int(request.params['items_per_page']) except: items_per_page = 15 subset = Page(balance_changes, item_count=total, current_page=page_nr, items_per_page=items_per_page) return { "totalItems" : total, "itemsFound" : len(subset), "items" : [{ "uid" : item.uid, "name" : item.name, "summary" : h.format_decimal(Decimal(item.summary))} for item in subset ] }
def test_compound(self): t1 = table('t1', column('c1'), column('c2'), column('c3')) t2 = table('t2', column('c1'), column('c2'), column('c3')) self.assert_compile(union(t1.select(), t2.select()), 'SELECT t1.c1, t1.c2, t1.c3 FROM t1 UNION ' 'SELECT t2.c1, t2.c2, t2.c3 FROM t2') self.assert_compile(except_(t1.select(), t2.select()), 'SELECT t1.c1, t1.c2, t1.c3 FROM t1 MINUS ' 'SELECT t2.c1, t2.c2, t2.c3 FROM t2')
def test_union_heterogeneous_columns(self): emp, dep, conn = self._emp_d_fixture() s1 = select([emp.c.name]) s2 = select([dep.c.name]) u1 = union(s1, s2) r = self._exec_stmt(conn, u1) eq_(r.fetchall(), [('ed', ), ('wendy', ), ('jack', ), ('Engineering', ), ('Accounting', ), ('Sales', )])
def test_union_all(self): e = union_all(select([t1.c.col3]), union(select([t1.c.col3]), select([t1.c.col3]))) wanted = [("aaa",), ("aaa",), ("bbb",), ("bbb",), ("ccc",), ("ccc",)] found1 = self._fetchall_sorted(e.execute()) eq_(found1, wanted) found2 = self._fetchall_sorted(e.alias("foo").select().execute()) eq_(found2, wanted)
def test_union_ordered_alias(self): (s1, s2) = ( select([t1.c.col3.label("col3"), t1.c.col4.label("col4")], t1.c.col2.in_(["t1col2r1", "t1col2r2"])), select([t2.c.col3.label("col3"), t2.c.col4.label("col4")], t2.c.col2.in_(["t2col2r2", "t2col2r3"])), ) u = union(s1, s2, order_by=["col3", "col4"]) wanted = [("aaa", "aaa"), ("bbb", "bbb"), ("bbb", "ccc"), ("ccc", "aaa")] eq_(u.alias("bar").select().execute().fetchall(), wanted)
def test_union_homogeneous(self): emp, dep, conn = self._emp_d_fixture() s1 = select([emp.c.name]).where(emp.c.name == "wendy") s2 = select([emp.c.name ]).where(or_(emp.c.name == 'wendy', emp.c.name == 'jack')) u1 = union(s1, s2) r = self._exec_stmt(conn, u1) eq_(r.fetchall(), [('wendy', ), ('jack', )])
def test_limit_render_multiple_times(self, connection): table = self.tables.some_table stmt = select(table.c.id).order_by(table.c.id).limit(1).scalar_subquery() u = sqlalchemy.union(select(stmt), select(stmt)).subquery().select() self._assert_result( connection, u, [(1,)], )
def __select_all__(cls): two_values = sqlalchemy.orm.Query( sqlalchemy.union( sqlalchemy.select([1]), sqlalchemy.select([2]), )).subquery() return super(Label, cls).__select_all__() \ .join(two_values, sqlalchemy.literal(True))
def test_limit_offset_aliased_selectable_in_unions(self): table = self.tables.some_table s1 = select([table]).where(table.c.id == 2).\ limit(1).order_by(table.c.id).alias().select() s2 = select([table]).where(table.c.id == 3).\ limit(1).order_by(table.c.id).alias().select() u1 = union(s1, s2).limit(2) self._assert_result(u1.order_by(u1.c.id), [(2, 2, 3), (3, 3, 4)])
def test_union_heterogeneous_types(self): emp, dep, conn = self._emp_d_fixture() s1 = select([emp.c.name, emp.c.fullname]).where(emp.c.name == 'jack') s2 = select([dep.c.dep_id, dep.c.name]) u1 = union(s1, s2) r = self._exec_stmt(conn, u1) eq_(r.fetchall(), [('jack', 'Jack Smith'), (1, 'Engineering'), (2, 'Accounting'), (3, 'Sales')])
def test_limit_offset_aliased_selectable_in_unions(faux_conn): table = some_table(faux_conn) s1 = (select([table]).where(table.c.id == 2).limit(1).order_by( table.c.id).alias().select()) s2 = (select([table]).where(table.c.id == 3).limit(1).order_by( table.c.id).alias().select()) u1 = union(s1, s2).limit(2) assert_result(faux_conn, u1.order_by(u1.c.id), [(2, 2, 3), (3, 3, 4)])
def test_date(session): dates = ( date(2016, 1, 1), date(2016, 1, 2), ) selects = tuple(sa.select((MakeADate(d),)) for d in dates) data = sa.alias(sa.union(*selects, use_labels=True), 'dates') stmt = sa.select((data,)) result = session.execute(stmt).fetchall() assert tuple(chain.from_iterable(result)) == dates
def test_union_column(self): s1 = select([table1]) s2 = select([table1]) stmt = union(s1, s2).order_by("name") self.assert_compile( stmt, "SELECT mytable.myid, mytable.name, mytable.description FROM " "mytable UNION SELECT mytable.myid, mytable.name, " "mytable.description FROM mytable ORDER BY name", )
def test_except_style1(self): e = except_( union(select([t1.c.col3, t1.c.col4]), select([t2.c.col3, t2.c.col4]), select([t3.c.col3, t3.c.col4])), select([t2.c.col3, t2.c.col4]), ) wanted = [("aaa", "aaa"), ("aaa", "ccc"), ("bbb", "aaa"), ("bbb", "bbb"), ("ccc", "bbb"), ("ccc", "ccc")] found = self._fetchall_sorted(e.alias().select().execute()) eq_(found, wanted)
def test_select_from_plain_union(self): table = self.tables.some_table s1 = select([table]).where(table.c.id == 2) s2 = select([table]).where(table.c.id == 3) u1 = union(s1, s2).alias().select() self._assert_result( u1.order_by(u1.c.id), [(2, 2, 3), (3, 3, 4)] )
def list_worksheets(self, user_id=None): ''' Return a list of row dicts, one per worksheet. These dicts do NOT contain ALL worksheet items; this method is meant to make it easy for a user to see their existing worksheets. ''' cols_to_select = [cl_worksheet.c.id, cl_worksheet.c.uuid, cl_worksheet.c.name, cl_worksheet.c.owner_id, cl_group_object_permission.c.permission] cols1 = cols_to_select[:4] cols1.extend([literal(GROUP_OBJECT_PERMISSION_ALL).label('permission')]) if user_id == self.root_user_id: # query all worksheets stmt = select(cols1) elif user_id is None: # query for public worksheets (only used by the webserver when user is not logged in) stmt = select(cols_to_select).\ where(cl_worksheet.c.uuid == cl_group_object_permission.c.object_uuid).\ where(cl_group_object_permission.c.group_uuid == self.public_group_uuid) else: # 1) Worksheets owned by owner_id stmt1 = select(cols1).where(cl_worksheet.c.owner_id == user_id) # 2) Worksheets visible to owner_id or co-owned by owner_id stmt2_groups = select([cl_user_group.c.group_uuid]).\ where(cl_user_group.c.user_id == user_id) # List worksheets where one of our groups has permission. stmt2 = select(cols_to_select).\ where(cl_worksheet.c.uuid == cl_group_object_permission.c.object_uuid).\ where(or_( cl_group_object_permission.c.group_uuid.in_(stmt2_groups), cl_group_object_permission.c.group_uuid == self.public_group_uuid)).\ where(cl_worksheet.c.owner_id != user_id) # Avoid duplicates stmt = union(stmt1, stmt2) with self.engine.begin() as connection: rows = connection.execute(stmt).fetchall() if not rows: return [] # Get permissions of the worksheets worksheet_uuids = [row.uuid for row in rows] uuid_group_permissions = dict(zip(worksheet_uuids, self.batch_get_group_permissions(worksheet_uuids))) # Put the permissions into the worksheets row_dicts = [] for row in sorted(rows, key=lambda item: item['id']): row = dict(row) row['group_permissions'] = uuid_group_permissions[row['uuid']] row_dicts.append(row) return row_dicts
def test_composite_alias(self): ua = intersect( select([t2.c.col3, t2.c.col4]), union(select([t1.c.col3, t1.c.col4]), select([t2.c.col3, t2.c.col4]), select([t3.c.col3, t3.c.col4])) .alias() .select(), ).alias() wanted = [("aaa", "bbb"), ("bbb", "ccc"), ("ccc", "aaa")] found = self._fetchall_sorted(ua.select().execute()) eq_(found, wanted)
def test_except_style1(self): e = except_(union( select([t1.c.col3, t1.c.col4]), select([t2.c.col3, t2.c.col4]), select([t3.c.col3, t3.c.col4]), ), select([t2.c.col3, t2.c.col4])) wanted = [('aaa', 'aaa'), ('aaa', 'ccc'), ('bbb', 'aaa'), ('bbb', 'bbb'), ('ccc', 'bbb'), ('ccc', 'ccc')] found = self._fetchall_sorted(e.alias().select().execute()) eq_(found, wanted)
def test_limit_offset_aliased_selectable_in_unions(self): table = self.tables.some_table s1 = select([table]).where(table.c.id == 2).\ limit(1).order_by(table.c.id).alias().select() s2 = select([table]).where(table.c.id == 3).\ limit(1).order_by(table.c.id).alias().select() u1 = union(s1, s2).limit(2) self._assert_result( u1.order_by(u1.c.id), [(2, 2, 3), (3, 3, 4)] )
def test_distinct_selectable_in_unions(self): table = self.tables.some_table s1 = select([table]).where(table.c.id == 2).\ distinct() s2 = select([table]).where(table.c.id == 3).\ distinct() u1 = union(s1, s2).limit(2) self._assert_result( u1.order_by(u1.c.id), [(2, 2, 3), (3, 3, 4)] )
def test_b_ab1_union_b_ab2(self): j1 = a.join(b1) j2 = a.join(b2) b_j1 = b.join(j1) b_j2 = b.join(j2) s = union( select([b_j1], use_labels=True), select([b_j2], use_labels=True) ).select(use_labels=True) self._test(s, self._b_ab1_union_c_ab2)