def go(): users[:] = (sess.query(User).options(joinedload("*")).options( defaultload(User.addresses).joinedload("*")).options( defaultload(User.orders).joinedload("*")).options( defaultload(User.orders).defaultload( Order.items).joinedload("*")).order_by( self.classes.User.id).all())
def serialize_timetable(self, event, days=None, hide_weekends=False, hide_empty_days=False): timetable = {} for day in iterdays(event.start_dt_local, event.end_dt_local, skip_weekends=hide_weekends, day_whitelist=days): date_str = day.strftime('%Y%m%d') timetable[date_str] = {} contributions_strategy = defaultload('contribution') contributions_strategy.subqueryload('person_links') contributions_strategy.subqueryload('references') query_options = (contributions_strategy, defaultload('session_block').subqueryload('person_links')) query = (TimetableEntry.query.with_parent(event) .options(*query_options) .order_by(TimetableEntry.type != TimetableEntryType.SESSION_BLOCK)) for entry in query: day = entry.start_dt.astimezone(event.tzinfo).date() date_str = day.strftime('%Y%m%d') if date_str not in timetable: continue if not entry.can_view(session.user): continue data = self.serialize_timetable_entry(entry, load_children=False) key = self._get_entry_key(entry) if entry.parent: parent_code = 's{}'.format(entry.parent_id) timetable[date_str][parent_code]['entries'][key] = data else: timetable[date_str][key] = data if hide_empty_days: timetable = self._filter_empty_days(timetable) return timetable
def test_unbound_options(self): User, Address, Keyword, Order, Item = self.classes( "User", "Address", "Keyword", "Order", "Item") self._run_cache_key_fixture( lambda: ( joinedload(User.addresses), joinedload(User.addresses.of_type(aliased(Address))), joinedload("addresses"), joinedload(User.orders), joinedload(User.orders.and_(Order.id != 5)), joinedload(User.orders.and_(Order.id == 5)), joinedload(User.orders.and_(Order.description != "somename")), joinedload(User.orders).selectinload("items"), joinedload(User.orders).selectinload(Order.items), defer(User.id), defer("id"), defer("*"), defer(Address.id), subqueryload(User.orders), selectinload(User.orders), joinedload(User.addresses).defer(Address.id), joinedload(aliased(User).addresses).defer(Address.id), joinedload(User.addresses).defer("id"), joinedload(User.orders).joinedload(Order.items), joinedload(User.orders).subqueryload(Order.items), subqueryload(User.orders).subqueryload(Order.items), subqueryload(User.orders).subqueryload(Order.items).defer( Item.description), defaultload(User.orders).defaultload(Order.items), defaultload(User.orders), ), compare_values=True, )
def go(): users[:] = (sess.query(User).options(subqueryload("*")).options( defaultload(User.addresses).subqueryload("*")).options( defaultload(User.orders).subqueryload("*")).options( defaultload(User.orders).defaultload( Order.items).subqueryload("*")).order_by( User.id).all())
def undefer_qtys(entity): """Return options to undefer the qtys group on a related entity""" if sqlalchemy.__version__ < "1.1.14": return defaultload(entity)\ .undefer("used")\ .undefer("sold")\ .undefer("remaining") return defaultload(entity).undefer_group("qtys")
def get_all_notifications() -> t.Union[ExtendedJSONResponse[NotificationsJSON], JSONResponse[HasUnreadNotifcationJSON], ]: """Get all notifications for the current user. .. :quickref: Notification; Get all notifications. :query boolean has_unread: If considered true a short digest will be send, i.e. a single object with one key ``has_unread`` with a boolean value. Please use this if you simply want to check if there are unread notifications. :returns: Either a :class:`.NotificationsJSON` or a `HasUnreadNotifcationJSON` based on the ``has_unread`` parameter. """ notifications = db.session.query(Notification).join( Notification.comment_reply ).filter( ~models.CommentReply.deleted, Notification.receiver == current_user, ).order_by( Notification.read.asc(), Notification.created_at.desc(), ).options( contains_eager(Notification.comment_reply), defaultload(Notification.comment_reply).defer( models.CommentReply.last_edit ), defaultload( Notification.comment_reply, ).defaultload( models.CommentReply.comment_base, ).defaultload( models.CommentBase.file, ).selectinload( models.File.work, ), ).yield_per(_MAX_NOTIFICATION_AMOUNT) def can_see(noti: Notification) -> bool: return auth.NotificationPermissions(noti).ensure_may_see.as_bool() if request_arg_true('has_unread'): has_unread = any( map(can_see, notifications.filter(~Notification.read)) ) return JSONResponse.make({'has_unread': has_unread}) return ExtendedJSONResponse.make( NotificationsJSON( notifications=[ n for n in itertools.islice(notifications, _MAX_NOTIFICATION_AMOUNT) if can_see(n) ] ), use_extended=(models.CommentReply, Notification) )
def test_options_selectinquery(self): """ selectinquery() + load_only() + options(selectinquery() + load_only()) """ engine, ssn = self.engine, self.ssn with QueryLogger(engine) as ql: q = ssn.query(models.User).options( selectinquery( models.User.articles, lambda q, **kw: q.filter(models.Article.id > 10 ) # first level filter() .options( defaultload(models.User.articles).load_only( models.Article.title) # first level options() .selectinquery( models.Article.comments, lambda q, **kw: q.filter(models.Comment.uid > 1 ) # second level filter() .options( defaultload(models.User.articles).defaultload( models.Article.comments).load_only( models.Comment.text ) # second level options() ))))) res = q.all() # Test query self.assertQuery(ql[1], 'AND a.id > 10') if SA_12: self.assertSelectedColumns(ql[1], 'a.id', 'u_1.id', 'a.title') # PK, FK, load_only() else: self.assertSelectedColumns(ql[1], 'a.id', 'a.uid', 'a.title') # PK, FK, load_only() # Test second query self.assertQuery(ql[2], 'AND c.uid > 1') if SA_12: self.assertSelectedColumns(ql[2], 'c.id', 'a_1.id', 'c.text') # PK, FK, load_only() else: self.assertSelectedColumns(ql[2], 'c.id', 'c.aid', 'c.text') # PK, FK, load_only() # Test results self.assert_users_articles_comments( res, 3, 5, 1) # 3 users, 5 articles, 1 comment
def get_database_metadata(self): session_wrapper = sessionmaker(bind=self.__engine) session = session_wrapper() try: databases = session.query(entities.Database).options( defaultload(entities.Database.tables).joinedload( entities.Table.table_params), defaultload(entities.Database.tables).joinedload( entities.Table.table_storages).joinedload( entities.TableStorage.columns)).all() return {'databases': databases} except exc.OperationalError: logging.error('Unable to connect to the metadata database.') raise
def _test_load_only_propagate(self, use_load): User = self.classes.User Address = self.classes.Address users = self.tables.users addresses = self.tables.addresses mapper(User, users, properties={ "addresses": relationship(Address) }) mapper(Address, addresses) sess = create_session() expected = [ ("SELECT users.id AS users_id, users.name AS users_name " "FROM users WHERE users.id IN (:id_1, :id_2)", {'id_2': 8, 'id_1': 7}), ("SELECT addresses.id AS addresses_id, " "addresses.email_address AS addresses_email_address " "FROM addresses WHERE :param_1 = addresses.user_id", {'param_1': 7}), ("SELECT addresses.id AS addresses_id, " "addresses.email_address AS addresses_email_address " "FROM addresses WHERE :param_1 = addresses.user_id", {'param_1': 8}), ] if use_load: opt = Load(User).defaultload(User.addresses).load_only("id", "email_address") else: opt = defaultload(User.addresses).load_only("id", "email_address") q = sess.query(User).options(opt).filter(User.id.in_([7, 8])) def go(): for user in q: user.addresses self.sql_eq_(go, expected)
def test_load_only_path_specific(self): User = self.classes.User Address = self.classes.Address Order = self.classes.Order users = self.tables.users addresses = self.tables.addresses orders = self.tables.orders mapper(User, users, properties=util.OrderedDict([ ("addresses", relationship(Address, lazy="joined")), ("orders", relationship(Order, lazy="joined")) ])) mapper(Address, addresses) mapper(Order, orders) sess = create_session() q = sess.query(User).options( load_only("name").defaultload("addresses").load_only("id", "email_address"), defaultload("orders").load_only("id") ) # hmmmm joinedload seems to be forcing users.id into here... self.assert_compile( q, "SELECT users.id AS users_id, users.name AS users_name, " "addresses_1.id AS addresses_1_id, " "addresses_1.email_address AS addresses_1_email_address, " "orders_1.id AS orders_1_id FROM users " "LEFT OUTER JOIN addresses AS addresses_1 " "ON users.id = addresses_1.user_id " "LEFT OUTER JOIN orders AS orders_1 ON users.id = orders_1.user_id" )
def get_chat_list(db: Session, user: str, limit: int = 10, offset: int = 0) -> List[ChatRoom]: result = db.query(ChatRoom).filter(ChatRoom.users.any(user=user)).options( defaultload('users')).offset(offset).limit(limit).all() return result
def as_relation(self, join_path: Union[Tuple[RelationshipProperty], None] = None): """ Handle a model in relationship with another model This internal method is used when working with deeper relations. For example, when you're querying `User`, who has `User.articles`, and you want to specify lazyload() on the fields of that article, you can't just do `lazyload(User.articles)` ; you have to tell sqlalchemy that you actually mean a model that is going to be loaded through a relationship. You do it this way: defaultload(models.User.articles).lazyload(models.Article) Then SqlAlchemy will know that you actually mean a related model. To achieve this, we keep track of nested relations in the form of `join_path`. `self._as_relation` is the Load() interface for chaining methods for deeper relationships. :param join_path: A tuple of relationships leading to this query. """ if join_path: self._join_path = join_path self._as_relation = defaultload(*self._join_path) else: # Set default # This behavior is used by the __copy__() method to reset the attribute self._join_path = () self._as_relation = Load(self.model) return self
def test_unsafe_unbound_option_cancels_bake(self): User, Address, Dingaling = self._o2m_twolevel_fixture(lazy="joined") class SubDingaling(Dingaling): pass mapper(SubDingaling, None, inherits=Dingaling) lru = Address.dingalings.property._lazy_strategy._bakery( lambda q: None )._bakery l1 = len(lru) for i in range(5): sess = Session() u1 = ( sess.query(User) .options( defaultload(User.addresses).lazyload( Address.dingalings.of_type(aliased(SubDingaling)) ) ) .first() ) for ad in u1.addresses: ad.dingalings l2 = len(lru) eq_(l1, 0) eq_(l2, 1)
def test_fetch_results(self): A, B, C, D, E, F, G = self.classes("A", "B", "C", "D", "E", "F", "G") sess = Session(testing.db) q = sess.query(A).options( joinedload(A.bs).joinedload(B.cs).joinedload(C.ds), joinedload(A.es).joinedload(E.fs), defaultload(A.es).joinedload(E.gs), ) compile_state = q._compile_state() from sqlalchemy.orm.context import ORMCompileState @profiling.function_call_count() def go(): for i in range(100): exec_opts = {} bind_arguments = {} ORMCompileState.orm_pre_session_exec(sess, compile_state.query, exec_opts, bind_arguments) r = sess.connection().execute( compile_state.statement, execution_options=exec_opts, bind_arguments=bind_arguments, ) r.context.compiled.compile_state = compile_state obj = ORMCompileState.orm_setup_cursor_result(sess, {}, r) list(obj) sess.close() go()
def _clone_timetable(self, new_event): offset = new_event.start_dt - self.old_event.start_dt # no need to copy the type; it's set automatically based on the object attrs = get_simple_column_attrs(TimetableEntry) - {'type', 'start_dt'} break_strategy = defaultload('break_') break_strategy.joinedload('own_venue') break_strategy.joinedload('own_room').lazyload('*') query = (self.old_event.timetable_entries.options( joinedload('parent').lazyload('*'), break_strategy).order_by( TimetableEntry.parent_id.is_(None).desc())) # iterate over all timetable entries; start with top-level # ones so we can build a mapping that can be used once we # reach nested entries entry_map = {} for old_entry in query: entry = TimetableEntry() entry.start_dt = old_entry.start_dt + offset entry.populate_from_attrs(old_entry, attrs) if old_entry.parent is not None: entry.parent = entry_map[old_entry.parent] if old_entry.session_block is not None: entry.session_block = self._session_block_map[ old_entry.session_block] if old_entry.contribution is not None: entry.contribution = self._contrib_map[old_entry.contribution] if old_entry.break_ is not None: entry.break_ = self._clone_break(old_entry.break_) new_event.timetable_entries.append(entry) entry_map[old_entry] = entry
def test_options(self): """ selectinquery() + options(load_only()) + limit """ engine, ssn = self.engine, self.ssn with QueryLogger(engine) as ql: q = ssn.query(models.User).options( selectinquery( models.User.articles, # Notice how we still have to apply the options using the relationship! lambda q, **kw: q.options( defaultload(models.User.articles).load_only( models.Article.title)).limit(1))) res = q.all() # Test query self.assertQuery(ql[1], 'LIMIT 1') if SA_12: self.assertSelectedColumns(ql[1], 'a.id', 'u_1.id', 'a.title') # PK, FK, load_only() else: self.assertSelectedColumns(ql[1], 'a.id', 'a.uid', 'a.title') # PK, FK, load_only() # Test results self.assert_users_articles_comments( res, 3, 1, None ) # 3 users, 1 article in total ; just one, because of the limit
def _clone_timetable(self, new_event): offset = new_event.start_dt - self.old_event.start_dt # no need to copy the type; it's set automatically based on the object attrs = get_simple_column_attrs(TimetableEntry) - {'type', 'start_dt'} break_strategy = defaultload('break_') break_strategy.joinedload('own_venue') break_strategy.joinedload('own_room').lazyload('*') query = (self.old_event.timetable_entries .options(joinedload('parent').lazyload('*'), break_strategy) .order_by(TimetableEntry.parent_id.is_(None).desc())) # iterate over all timetable entries; start with top-level # ones so we can build a mapping that can be used once we # reach nested entries entry_map = {} for old_entry in query: entry = TimetableEntry() entry.start_dt = old_entry.start_dt + offset entry.populate_from_attrs(old_entry, attrs) if old_entry.parent is not None: entry.parent = entry_map[old_entry.parent] if old_entry.session_block is not None: entry.session_block = self._session_block_map[old_entry.session_block] if old_entry.contribution is not None: entry.contribution = self._contrib_map[old_entry.contribution] if old_entry.break_ is not None: entry.break_ = self._clone_break(old_entry.break_) new_event.timetable_entries.append(entry) entry_map[old_entry] = entry
def go(): for i in range(100): q = sess.query(A).options( joinedload(A.bs).joinedload(B.cs).joinedload(C.ds), joinedload(A.es).joinedload(E.fs), defaultload(A.es).joinedload(E.gs), ) q._compile_context()
def serialize_timetable(self, event, days=None, hide_weekends=False, strip_empty_days=False): tzinfo = event.tzinfo if self.management else event.display_tzinfo event.preload_all_acl_entries() timetable = {} for day in iterdays(event.start_dt.astimezone(tzinfo), event.end_dt.astimezone(tzinfo), skip_weekends=hide_weekends, day_whitelist=days): date_str = day.strftime('%Y%m%d') timetable[date_str] = {} contributions_strategy = defaultload('contribution') contributions_strategy.subqueryload('person_links') contributions_strategy.subqueryload('references') query_options = ( contributions_strategy, defaultload('session_block').subqueryload('person_links')) query = (TimetableEntry.query.with_parent(event).options( *query_options).order_by( TimetableEntry.type != TimetableEntryType.SESSION_BLOCK)) for entry in query: day = entry.start_dt.astimezone(tzinfo).date() date_str = day.strftime('%Y%m%d') if date_str not in timetable: continue if not entry.can_view(session.user): continue data = self.serialize_timetable_entry(entry, load_children=False) key = self._get_entry_key(entry) if entry.parent: parent_code = 's{}'.format(entry.parent_id) timetable[date_str][parent_code]['entries'][key] = data else: if (entry.type == TimetableEntryType.SESSION_BLOCK and entry.start_dt.astimezone(tzinfo).date() != entry.end_dt.astimezone(tzinfo).date()): # If a session block lasts into another day we need to add it to that day, too timetable[entry.end_dt.astimezone(tzinfo).date().strftime( '%Y%m%d')][key] = data timetable[date_str][key] = data if strip_empty_days: timetable = self._strip_empty_days(timetable) return timetable
def _checkParams(self, params): RHManageSurveysBase._checkParams(self, params) survey_strategy = joinedload('survey') answers_strategy = defaultload('answers').joinedload('question') self.submission = (SurveySubmission .find(id=request.view_args['submission_id']) .options(answers_strategy, survey_strategy) .one())
def _checkParams(self, params): RHManageSurveysBase._checkParams(self, params) survey_strategy = joinedload('survey') answers_strategy = defaultload('answers').joinedload('question') sections_strategy = joinedload('survey').defaultload('sections').joinedload('children') self.submission = (SurveySubmission .find(id=request.view_args['submission_id']) .options(answers_strategy, survey_strategy, sections_strategy) .one())
def __init__(self, occurrences, start_dt, end_dt, candidates=None, rooms=None, specific_room=None, repeat_frequency=None, repeat_interval=None, flexible_days=0, show_blockings=True): self.occurrences = occurrences self.start_dt = start_dt self.end_dt = end_dt self.candidates = candidates self.rooms = rooms self.specific_room = specific_room self.repeat_frequency = repeat_frequency self.repeat_interval = repeat_interval self.flexible_days = flexible_days self.show_blockings = show_blockings self.conflicts = 0 self.bars = [] if self.specific_room and self.rooms: raise ValueError('specific_room and rooms are mutually exclusive') if self.specific_room: self.rooms = [self.specific_room] elif self.rooms is None: self.rooms = Room.find_all(is_active=True) self.rooms = sorted(self.rooms, key=lambda x: natural_sort_key(x.full_name)) if self.show_blockings: # avoid loading user data we don't care about user_strategy = defaultload('blocking').defaultload( 'created_by_user') user_strategy.noload('*') user_strategy.load_only('first_name', 'last_name') room_ids = [r.id for r in self.rooms] filters = { 'room_ids': room_ids, 'state': BlockedRoom.State.accepted, 'start_date': self.start_dt.date(), 'end_date': self.end_dt.date() } self.blocked_rooms = BlockedRoom.find_with_filters( filters).options(user_strategy) self.nonbookable_periods = NonBookablePeriod.find( NonBookablePeriod.room_id.in_(room_ids), NonBookablePeriod.overlaps(self.start_dt, self.end_dt)).all() else: self.blocked_rooms = [] self._produce_bars()
def _process_args(self): RHManageSurveysBase._process_args(self) survey_strategy = joinedload('survey') answers_strategy = defaultload('answers').joinedload('question') sections_strategy = joinedload('survey').defaultload('sections').joinedload('children') self.submission = (SurveySubmission .find(id=request.view_args['submission_id']) .options(answers_strategy, survey_strategy, sections_strategy) .one())
def _process_args(self): RHManageSurveysBase._process_args(self) survey_strategy = joinedload('survey') answers_strategy = defaultload('answers').joinedload('question') sections_strategy = joinedload('survey').defaultload( 'sections').joinedload('children') self.submission = (SurveySubmission.query.filter_by( id=request.view_args['submission_id']).options( answers_strategy, survey_strategy, sections_strategy).one())
def test_unbound_cache_key_undefer_group(self): User, Address = self.classes('User', 'Address') query_path = self._make_path_registry([User, "addresses"]) opt = defaultload(User.addresses).undefer_group('xyz') eq_(opt._generate_cache_key(query_path), ((Address, 'column:*', ("undefer_group_xyz", True)), ))
def get_article(self, article_id): article_entity = self.session.query(Article) \ .options( defaultload(Article.nl_entities).subqueryload(NLEntity.location), ).get(article_id) article_schema = ArticleSchema() article = article_schema.dump(article_entity) return article
def _get_generation_strategy_sqa( gs_id: int, decoder: Decoder, reduced_state: bool = False ) -> SQAGenerationStrategy: """Obtains most of the SQLAlchemy experiment object from DB.""" gs_sqa_class = cast( Type[SQAGenerationStrategy], decoder.config.class_to_sqa_class[GenerationStrategy], ) gr_sqa_class = cast( Type[SQAGeneratorRun], decoder.config.class_to_sqa_class[GeneratorRun], ) with session_scope() as session: query = session.query(gs_sqa_class).filter_by(id=gs_id) if reduced_state: query = query.options( lazyload("generator_runs.parameters"), lazyload("generator_runs.parameter_constraints"), lazyload("generator_runs.metrics"), defaultload(gs_sqa_class.generator_runs).defer("model_kwargs"), defaultload(gs_sqa_class.generator_runs).defer("bridge_kwargs"), defaultload(gs_sqa_class.generator_runs).defer("model_state_after_gen"), defaultload(gs_sqa_class.generator_runs).defer("gen_metadata"), ) gs_sqa = query.one_or_none() if gs_sqa is None: raise ValueError(f"Generation strategy with ID #{gs_id} not found.") # Load full last generator run (including model state), for generation # strategy restoration, if loading reduced state. if reduced_state and gs_sqa.generator_runs: last_generator_run_id = gs_sqa.generator_runs[-1].id with session_scope() as session: last_gr_sqa = ( session.query(gr_sqa_class) .filter_by(id=last_generator_run_id) .one_or_none() ) # Swap last generator run with no state for a generator run with # state. gs_sqa.generator_runs[len(gs_sqa.generator_runs) - 1] = last_gr_sqa return gs_sqa
def get_query_options_to_defer_large_model_cols( ) -> List[strategy_options.Load]: """Returns the query options that defer loading of model-state-related columns of generator runs, which can be large and are not needed on every generator run when loading experiment and generation strategy in reduced state. """ return [ defaultload("generator_runs").defer(col.key) for col in GR_LARGE_MODEL_ATTRS ]
class RHDisplayAbstract(RHAbstractBase): _abstract_query_options = (joinedload('reviewed_for_tracks'), defaultload('reviews').joinedload('ratings').joinedload('question')) @property def view_class(self): return WPManageAbstracts if self.management else WPDisplayAbstracts def _process(self): return render_abstract_page(self.abstract, view_class=self.view_class, management=self.management)
def _checkParams(self, params): RHManageRegFormBase._checkParams(self, params) self.registration = (Registration.find( Registration.id == request.view_args['registration_id'], ~Registration.is_deleted, ~RegistrationForm.is_deleted).join( Registration.registration_form).options( contains_eager(Registration.registration_form). defaultload('form_items').joinedload('children')).options( defaultload( Registration.data).joinedload('field_data')).one())
def supplement_halo_query(self, halo_query): """Return a sqlalchemy query with a supplemental join to allow this calculation to run efficiently""" name_targets = self.retrieves_dict_ids() halo_alias = tangos.core.halo.Halo augmented_query = halo_query for i in range(self.n_join_levels()): halo_property_alias = aliased(tangos.core.halo_data.HaloProperty) halo_link_alias = aliased(tangos.core.halo_data.HaloLink) path_to_properties = [ tangos.core.halo.Halo.all_links, tangos.core.halo_data.HaloLink.halo_to ] * i + [tangos.core.halo.Halo.all_properties] path_to_links = [ tangos.core.halo.Halo.all_links, tangos.core.halo_data.HaloLink.halo_to ] * i + [tangos.core.halo.Halo.all_links] if len(name_targets) > 0: property_name_condition = halo_property_alias.name_id.in_( name_targets) link_name_condition = ( halo_link_alias.relation_id.in_(name_targets)) else: # We know we're joining to a null list of properties; however simply setting these conditions # to False results in an apparently efficient SQL query (boils down to 0==1) which actually # takes a very long time to execute if the link or propery tables are large. Thus, compare # to an impossible value instead. property_name_condition = halo_property_alias.name_id == -1 link_name_condition = halo_link_alias.relation_id == -1 augmented_query =augmented_query.outerjoin(halo_property_alias, (halo_alias.id==halo_property_alias.halo_id) & property_name_condition).\ outerjoin(halo_link_alias, (halo_alias.id==halo_link_alias.halo_from_id) & link_name_condition).\ options(contains_eager(*path_to_properties, alias=halo_property_alias), contains_eager(*path_to_links, alias=halo_link_alias), defaultload(*path_to_properties).undefer_group("data")) if i < self.n_join_levels() - 1: next_level_halo_alias = aliased(tangos.core.halo.Halo) path_to_new_halo = path_to_links + [ tangos.core.halo_data.HaloLink.halo_to ] augmented_query = augmented_query.outerjoin(next_level_halo_alias, (halo_link_alias.halo_to_id==next_level_halo_alias.id)).\ options(contains_eager(*path_to_new_halo, alias=next_level_halo_alias)) halo_alias = next_level_halo_alias return augmented_query
def _get_generation_strategy_sqa_reduced_state( gs_id: int, decoder: Decoder) -> SQAGenerationStrategy: """Obtains most of the SQLAlchemy generation strategy object from DB.""" gs_sqa_class = cast( Type[SQAGenerationStrategy], decoder.config.class_to_sqa_class[GenerationStrategy], ) gr_sqa_class = cast( Type[SQAGeneratorRun], decoder.config.class_to_sqa_class[GeneratorRun], ) gs_sqa = _get_generation_strategy_sqa( gs_id=gs_id, decoder=decoder, query_options=[ lazyload("generator_runs.parameters"), lazyload("generator_runs.parameter_constraints"), lazyload("generator_runs.metrics"), defaultload(gs_sqa_class.generator_runs).defer("model_kwargs"), defaultload(gs_sqa_class.generator_runs).defer("bridge_kwargs"), defaultload( gs_sqa_class.generator_runs).defer("model_state_after_gen"), defaultload(gs_sqa_class.generator_runs).defer("gen_metadata"), ], ) # Load full last generator run (including model state), for generation # strategy restoration if gs_sqa.generator_runs: last_generator_run_id = gs_sqa.generator_runs[-1].id with session_scope() as session: last_gr_sqa = (session.query(gr_sqa_class).filter_by( id=last_generator_run_id).one_or_none()) # Swap last generator run with no state for a generator run with # state. gs_sqa.generator_runs[len(gs_sqa.generator_runs) - 1] = not_none(last_gr_sqa) return gs_sqa
def test_unbound_cache_key_included_safe_w_option(self): User, Address, Order, Item, SubItem = self.classes( 'User', 'Address', 'Order', 'Item', 'SubItem') opt = defaultload("orders").joinedload( "items", innerjoin=True).defer("description") query_path = self._make_path_registry([User, "orders"]) eq_(opt._generate_cache_key(query_path), ((Order, 'items', Item, ('lazy', 'joined'), ('innerjoin', True)), (Order, 'items', Item, 'description', ('deferred', True), ('instrument', True))))
def test_fetch_results(self): A, B, C, D, E, F, G = self.classes("A", "B", "C", "D", "E", "F", "G") sess = Session(testing.db) q = sess.query(A).options( joinedload(A.bs).joinedload(B.cs).joinedload(C.ds), joinedload(A.es).joinedload(E.fs), defaultload(A.es).joinedload(E.gs), ) compile_state = q._compile_state() from sqlalchemy.orm.context import ORMCompileState @profiling.function_call_count(warmup=1) def go(): for i in range(100): # NOTE: this test was broken in # 77f1b7d236dba6b1c859bb428ef32d118ec372e6 because we started # clearing out the attributes after the first iteration. make # sure the attributes are there every time. assert compile_state.attributes exec_opts = {} bind_arguments = {} ORMCompileState.orm_pre_session_exec( sess, compile_state.select_statement, {}, exec_opts, bind_arguments, is_reentrant_invoke=False, ) r = sess.connection().execute( compile_state.statement, execution_options=exec_opts, bind_arguments=bind_arguments, ) r.context.compiled.compile_state = compile_state obj = ORMCompileState.orm_setup_cursor_result( sess, compile_state.statement, {}, exec_opts, {}, r, ) list(obj.unique()) sess.close() go()
def _checkParams(self, params): RHManageRegFormBase._checkParams(self, params) self.registration = (Registration .find(Registration.id == request.view_args['registration_id'], ~Registration.is_deleted, ~RegistrationForm.is_deleted) .join(Registration.registration_form) .options(contains_eager(Registration.registration_form) .defaultload('form_items') .joinedload('children')) .options(defaultload(Registration.data) .joinedload('field_data')) .one())
def test_unbound_cache_key_undefer_group(self): User, Address = self.classes('User', 'Address') query_path = self._make_path_registry([User, "addresses"]) opt = defaultload(User.addresses).undefer_group('xyz') eq_( opt._generate_cache_key(query_path), ( (Address, 'column:*', ("undefer_group_xyz", True)), ) )
def serialize_timetable(self, days=None, hide_weekends=False, strip_empty_days=False): tzinfo = self.event.tzinfo if self.management else self.event.display_tzinfo self.event.preload_all_acl_entries() timetable = {} for day in iterdays(self.event.start_dt.astimezone(tzinfo), self.event.end_dt.astimezone(tzinfo), skip_weekends=hide_weekends, day_whitelist=days): date_str = day.strftime('%Y%m%d') timetable[date_str] = {} contributions_strategy = defaultload('contribution') contributions_strategy.subqueryload('person_links') contributions_strategy.subqueryload('references') query_options = (contributions_strategy, defaultload('session_block').subqueryload('person_links')) query = (TimetableEntry.query.with_parent(self.event) .options(*query_options) .order_by(TimetableEntry.type != TimetableEntryType.SESSION_BLOCK)) for entry in query: day = entry.start_dt.astimezone(tzinfo).date() date_str = day.strftime('%Y%m%d') if date_str not in timetable: continue if not entry.can_view(self.user): continue data = self.serialize_timetable_entry(entry, load_children=False) key = self._get_entry_key(entry) if entry.parent: parent_code = 's{}'.format(entry.parent_id) timetable[date_str][parent_code]['entries'][key] = data else: if (entry.type == TimetableEntryType.SESSION_BLOCK and entry.start_dt.astimezone(tzinfo).date() != entry.end_dt.astimezone(tzinfo).date()): # If a session block lasts into another day we need to add it to that day, too timetable[entry.end_dt.astimezone(tzinfo).date().strftime('%Y%m%d')][key] = data timetable[date_str][key] = data if strip_empty_days: timetable = self._strip_empty_days(timetable) return timetable
def test_safe_unbound_option_allows_bake(self): User, Address, Dingaling = self._o2m_twolevel_fixture(lazy="joined") lru = Address.dingalings.property._lazy_strategy._bakery( lambda q: None)._bakery l1 = len(lru) for i in range(5): sess = Session() u1 = sess.query(User).options( defaultload(User.addresses).lazyload( Address.dingalings)).first() for ad in u1.addresses: ad.dingalings l2 = len(lru) eq_(l1, 0) eq_(l2, 2)
def test_unbound_cache_key_included_safe_w_option(self): User, Address, Order, Item, SubItem = self.classes( 'User', 'Address', 'Order', 'Item', 'SubItem') opt = defaultload("orders").joinedload( "items", innerjoin=True).defer("description") query_path = self._make_path_registry([User, "orders"]) eq_( opt._generate_cache_key(query_path), ( (Order, 'items', Item, ('lazy', 'joined'), ('innerjoin', True)), (Order, 'items', Item, 'description', ('deferred', True), ('instrument', True)) ) )
def __init__(self, occurrences, start_dt, end_dt, candidates=None, rooms=None, specific_room=None, repeat_frequency=None, repeat_interval=None, flexible_days=0, show_blockings=True): self.occurrences = occurrences self.start_dt = start_dt self.end_dt = end_dt self.candidates = candidates self.rooms = rooms self.specific_room = specific_room self.repeat_frequency = repeat_frequency self.repeat_interval = repeat_interval self.flexible_days = flexible_days self.show_blockings = show_blockings self.conflicts = 0 self.bars = [] if self.specific_room and self.rooms: raise ValueError('specific_room and rooms are mutually exclusive') if self.specific_room: self.rooms = [self.specific_room] elif self.rooms is None: self.rooms = Room.find_all(is_active=True) self.rooms = sorted(self.rooms, key=lambda x: natural_sort_key(x.full_name)) if self.show_blockings: # avoid loading user data we don't care about user_strategy = defaultload('blocking').defaultload('created_by_user') user_strategy.noload('*') user_strategy.load_only('first_name', 'last_name') room_ids = [r.id for r in self.rooms] filters = { 'room_ids': room_ids, 'state': BlockedRoom.State.accepted, 'start_date': self.start_dt.date(), 'end_date': self.end_dt.date() } self.blocked_rooms = BlockedRoom.find_with_filters(filters).options(user_strategy) self.nonbookable_periods = NonBookablePeriod.find( NonBookablePeriod.room_id.in_(room_ids), NonBookablePeriod.overlaps(self.start_dt, self.end_dt) ).all() else: self.blocked_rooms = [] self._produce_bars()
def __init__(self, model, query, _as_relation=None): """ Init a MongoDB-style query :param model: MongoModel :type model: mongosql.MongoModel :param query: Query to work with :type query: sqlalchemy.orm.Query :param _as_relation: Parent relationship. Internal argument used when working with deeper relations: is used as initial path for defaultload(_as_relation).lazyload(...). :type _as_relation: sqlalchemy.orm.relationships.RelationshipProperty """ assert isinstance(model, MongoModel) assert isinstance(query, Query) self._model = model self._query = query self._as_relation = defaultload(_as_relation) if _as_relation else Load(self._model.model) self._no_joindefaults = False
def test_fetch_results(self): A, B, C, D, E, F, G = self.classes('A', 'B', 'C', 'D', 'E', 'F', 'G') sess = Session() q = sess.query(A).options( joinedload(A.bs).joinedload(B.cs).joinedload(C.ds), joinedload(A.es).joinedload(E.fs), defaultload(A.es).joinedload(E.gs), ) context = q._compile_context() @profiling.function_call_count() def go(): for i in range(100): obj = q._execute_and_instances(context) list(obj) sess.close() go()
def test_unbound_cache_key_included_safe_w_loadonly_strs(self): User, Address, Order, Item, SubItem = self.classes( 'User', 'Address', 'Order', 'Item', 'SubItem') query_path = self._make_path_registry([User, "addresses"]) opt = defaultload(User.addresses).load_only("id", "email_address") eq_( opt._generate_cache_key(query_path), ( (Address, 'id', ('deferred', False), ('instrument', True)), (Address, 'email_address', ('deferred', False), ('instrument', True)), (Address, 'column:*', ('deferred', True), ('instrument', True), ('undefer_pks', True)) ) )
def test_undefer_group_from_relationship_lazyload(self): users, Order, User, orders = \ (self.tables.users, self.classes.Order, self.classes.User, self.tables.orders) mapper(User, users, properties=dict( orders=relationship(Order, order_by=orders.c.id))) mapper( Order, orders, properties=util.OrderedDict([ ('userident', deferred(orders.c.user_id, group='primary')), ('description', deferred(orders.c.description, group='primary')), ('opened', deferred(orders.c.isopen, group='primary')) ]) ) sess = create_session() q = sess.query(User).filter(User.id == 7).options( defaultload(User.orders).undefer_group('primary') ) def go(): result = q.all() o2 = result[0].orders[1] eq_(o2.opened, 1) eq_(o2.userident, 7) eq_(o2.description, 'order 3') self.sql_eq_(go, [ ("SELECT users.id AS users_id, users.name AS users_name " "FROM users WHERE users.id = :id_1", {"id_1": 7}), ("SELECT orders.user_id AS orders_user_id, orders.description " "AS orders_description, orders.isopen AS orders_isopen, " "orders.id AS orders_id, orders.address_id AS orders_address_id " "FROM orders WHERE :param_1 = orders.user_id ORDER BY orders.id", {'param_1': 7})])
def _checkParams(self): self.regform = (RegistrationForm .find(id=request.view_args['reg_form_id'], is_deleted=False) .options(defaultload('form_items').joinedload('children').joinedload('current_data')) .one())
def _process_args(self): self.regform = (RegistrationForm.query .filter_by(id=request.view_args['reg_form_id'], is_deleted=False) .options(defaultload('form_items').joinedload('children').joinedload('current_data')) .one())
def _checkParams(self): self.regform = ( RegistrationForm.find(id=request.view_args["reg_form_id"], is_deleted=False) .options(defaultload("form_items").joinedload("children").joinedload("current_data")) .one() )