def test_pickle_prefetch_queryset_usable_outside_of_prefetch(self): # Prefetch shouldn't affect the fetch-on-pickle behavior of the # queryset passed to it. Group.objects.create(name='foo') events = Event.objects.order_by('id') Group.objects.prefetch_related( models.Prefetch('event_set', queryset=events)) with self.assertNumQueries(1): events2 = pickle.loads(pickle.dumps(events)) with self.assertNumQueries(0): list(events2)
def get_account_for_update(self, account_id): ''' Prefetches related "real" accounts safely for next update, using select_for_update. ''' return self.prefetch_related( models.Prefetch( 'accounts', queryset=Account.objects.select_for_update() ) ).get(id=account_id)
def get_with_context(**learning_unit_year_data): entity_container_prefetch = models.Prefetch( 'learning_container_year__entitycontaineryear_set', queryset=mdl.entity_container_year.search(link_type=[ entity_types.REQUIREMENT_ENTITY, entity_types.ALLOCATION_ENTITY, entity_types.ADDITIONAL_REQUIREMENT_ENTITY_1, entity_types.ADDITIONAL_REQUIREMENT_ENTITY_2 ]).prefetch_related( models.Prefetch('entity__entityversion_set', to_attr='entity_versions')), to_attr='entity_containers_year') learning_component_prefetch = models.Prefetch( 'learningunitcomponent_set', queryset=mdl.learning_unit_component.LearningUnitComponent.objects.all( ).order_by( 'learning_component_year__type', 'learning_component_year__acronym').select_related( 'learning_component_year').prefetch_related( models.Prefetch( 'learning_component_year__entitycomponentyear_set', queryset=mdl.entity_component_year.EntityComponentYear. objects.all().select_related('entity_container_year'), to_attr='entity_components_year')), to_attr='learning_unit_components') learning_units = mdl.learning_unit_year.search(**learning_unit_year_data) \ .select_related('academic_year', 'learning_container_year') \ .prefetch_related(entity_container_prefetch) \ .prefetch_related(learning_component_prefetch) \ .order_by('academic_year__year', 'acronym') learning_units = [ _append_latest_entities(learning_unit) for learning_unit in learning_units ] learning_units = [ _append_components(learning_unit) for learning_unit in learning_units ] return learning_units
def get_queryset(self): qs = super(ContactManager, self).get_queryset() return qs.annotate( note_count=models.Count('note', distinct=True), phonecall_count=models.Count('phonecall', distinct=True), ).prefetch_related( 'connection_set', models.Prefetch( 'visit_set', queryset=Visit.objects.order_by('scheduled').filter( arrived__isnull=True, status='pending'), to_attr='pending_visits'))
def prefetch_current_user_permissions(self, queryset: models.QuerySet): """Prefetch permissions for the current user.""" user = self.request.user filters = models.Q(user__username=settings.ANONYMOUS_USER_NAME) if not user.is_anonymous: filters |= models.Q(user=user) | models.Q( group__in=user.groups.all()) qs_permission_model = self.qs_permission_model.filter(filters) return queryset.prefetch_related( models.Prefetch("permission_group__permissions", queryset=qs_permission_model))
def get_children(self): if not hasattr(self, '_cached_children'): # TODO 现在我们只支持一级菜单,将来这里需要支持多级菜单 if self.parent_id: self._cached_children = list() else: self._cached_children = list( self.children.all().prefetch_related( models.Prefetch('link_page', queryset=ProjectPage.published. select_subclasses()))) return self._cached_children
def prefetch_related_icons(self, author: bool = True, deleter: bool = True): from .models import Image if not (author or deleter): return self prefetches = [] icons = Image.objects.icons().only('role_id') if author: prefetches.append( models.Prefetch('author__role__images', queryset=icons.all(), to_attr='icon')) if deleter: prefetches.append( models.Prefetch('deleter__role__images', queryset=icons.all(), to_attr='icon')) return self.prefetch_related(*prefetches)
def joins_for_csv_output(self): from popolo.models import Membership return self.prefetch_related( models.Prefetch( "memberships", Membership.objects.select_related("ballot", "ballot__election", "party", "post"), ), "images__uploading_user", "tmp_person_identifiers", )
def load_for_pks(cls, pks): now = timezone.now().date() programs = Program.rf_aware_objects.only( *cls._get_query_fields()).prefetch_related( models.Prefetch( 'indicator_set', queryset=Indicator.objects.order_by().select_related( None).prefetch_related(None).only( 'pk', 'program_id', 'target_frequency').filter(program_id__in=pks), to_attr='prefetch_indicators')).filter(pk__in=pks) return cls(programs, many=True, context={'now': now})
def get_with_context(**learning_unit_year_data): entity_container_prefetch = models.Prefetch( 'learning_container_year__entitycontaineryear_set', queryset=entity_container_year.search( link_type=REQUIREMENT_ENTITIES ).prefetch_related( models.Prefetch('entity__entityversion_set', to_attr='entity_versions') ), to_attr='entity_containers_year' ) learning_unit_years = learning_unit_year.search(**learning_unit_year_data) \ .select_related('academic_year', 'learning_container_year') \ .prefetch_related(entity_container_prefetch) \ .prefetch_related(get_learning_component_prefetch()) \ .order_by('academic_year__year', 'acronym') learning_unit_years = [append_latest_entities(luy) for luy in learning_unit_years] learning_unit_years = [append_components(luy) for luy in learning_unit_years] return learning_unit_years
def seal(self): clone = self._clone(_sealed=True) clone._prefetch_related_lookups = tuple( models.Prefetch( lookup, self.model._meta.get_field(lookup).remote_field.model._default_manager.all(), ) if isinstance(lookup, string_types) else lookup for lookup in clone._prefetch_related_lookups ) if issubclass(clone._iterable_class, models.query.ModelIterable): clone._iterable_class = SealedModelIterable return clone
def with_covers(self): album_prefetch = models.Prefetch( "album", queryset=music_models.Album.objects.select_related( "attachment_cover"), ) track_prefetch = models.Prefetch( "track", queryset=music_models.Track.objects.prefetch_related( album_prefetch).only("id", "album_id"), ) plt_prefetch = models.Prefetch( "playlist_tracks", queryset=PlaylistTrack.objects.all().exclude( track__album__attachment_cover=None).order_by("index").only( "id", "playlist_id", "track_id").prefetch_related(track_prefetch), to_attr="plts_for_cover", ) return self.prefetch_related(plt_prefetch)
def get_queryset(self): annotate_record_code = self.model.tracked_models.rel.related_model.objects.annotate_record_codes().order_by( "record_code", "subrecord_code", ) return ( super() .get_queryset() .prefetch_related( models.Prefetch("tracked_models", queryset=annotate_record_code), ) )
def __get_subjectpermissiongroups(self): subject = self.request.cradmin_role queryset = SubjectPermissionGroup.objects\ .filter(subject=subject)\ .select_related('subject')\ .prefetch_related( models.Prefetch( 'permissiongroup__permissiongroupuser_set', queryset=self.__prefetch_permissiongroupuser_queryset())) if self.custom_managable_subjectpermissiongroup: queryset = queryset.exclude(id=self.custom_managable_subjectpermissiongroup.id) return queryset
def get_with_context(**learning_unit_year_data): entity_version_prefetch = Entity.objects.all().prefetch_related( Prefetch('entityversion_set', to_attr='entity_versions')) requirement_entity_prefetch = models.Prefetch( 'learning_container_year__requirement_entity', queryset=entity_version_prefetch) allocation_entity_prefetch = models.Prefetch( 'learning_container_year__allocation_entity', queryset=entity_version_prefetch) additional_entity_1_prefetch = models.Prefetch( 'learning_container_year__additional_entity_1', queryset=entity_version_prefetch) additional_entity_2_prefetch = models.Prefetch( 'learning_container_year__additional_entity_2', queryset=entity_version_prefetch) learning_component_prefetch = models.Prefetch( 'learningcomponentyear_set', queryset=LearningComponentYear.objects.all().order_by( 'type', 'acronym'), to_attr='learning_components') learning_units = mdl.learning_unit_year.LearningUnitYear.objects.filter(subtype=FULL, **learning_unit_year_data) \ .select_related('academic_year', 'learning_container_year') \ .prefetch_related(requirement_entity_prefetch) \ .prefetch_related(allocation_entity_prefetch) \ .prefetch_related(additional_entity_1_prefetch) \ .prefetch_related(additional_entity_2_prefetch) \ .prefetch_related(learning_component_prefetch) \ .order_by('academic_year__year', 'acronym') learning_units = [ append_latest_entities(learning_unit) for learning_unit in learning_units ] learning_units = [ _append_components(learning_unit) for learning_unit in learning_units ] return learning_units
def get_pathway_compounds_for_dataset(dataset): filterSet = RepositoryCompound.objects.filter(db_name="kegg") pathways = Pathway.objects.filter(datasourcesuperpathway__data_source__name="kegg", datasourcesuperpathway__compoundpathway__compound__peak__dataset=dataset).\ distinct().prefetch_related( models.Prefetch( 'datasourcesuperpathway_set__compoundpathway_set__compound__repositorycompound_set', queryset=filterSet, to_attr='repositorycompounds') ) identifiedSecondaryIds = Compound.objects.filter( identified='True', peak__dataset=dataset).distinct().values_list('secondaryId', flat=True) logger.info('Number of pathways %d', len(pathways)) compound_ids = set( [c.id for c in Compound.objects.filter(peak__dataset=dataset)]) pathway_list = [] for pathway in pathways: identified_compounds = defaultdict(list) annotated_compounds = defaultdict(list) for dssp in pathway.datasourcesuperpathway_set.all(): cp_compounds = [ cp.compound for cp in dssp.compoundpathway_set.all() ] good_compounds = [ c for c in cp_compounds if c.id in compound_ids ] for compound in good_compounds: for ro in compound.repositorycompounds: if compound.identified == 'True': identified_compounds[ro.identifier].append( compound.peak_id) elif compound.identified == 'False' and compound.secondaryId not in identifiedSecondaryIds: annotated_compounds[ro.identifier].append( compound.peak_id) info = [ pathway, len(identified_compounds), len(annotated_compounds), round(((len(identified_compounds) + len(annotated_compounds)) / float(pathway.datasourcesuperpathway_set.all() [0].compound_number)) * 100, 2), [identified_compounds.keys(), annotated_compounds.keys()], pathway.datasourcesuperpathway_set.all()[0].compound_number ] pathway_list.append(info) logger.debug('Returning pathway_list') return pathway_list
def load(cls, pk): indicator_prefetch = models.Prefetch( 'indicator_set', queryset=Indicator.objects.filter(level__isnull=True).only( 'pk', 'name', 'means_of_verification', 'program', 'sector', 'number'), to_attr='unassigned_indicators') program = Program.rf_aware_objects.only( 'pk', 'name', '_using_results_framework', 'auto_number_indicators').prefetch_related( 'level_tiers', 'levels', 'levels__indicator_set', indicator_prefetch).get(pk=pk) return cls(program)
def for_addon(self, addon): return (self.filter( models.Q(guid=addon.addonguid_guid) | models.Q(addon=addon) | models.Q(user__in=addon.listed_authors) ).select_related('user').prefetch_related( # Should only need translations for addons on abuse reports, # so let's prefetch the add-on with them and avoid repeating # a ton of potentially duplicate queries with all the useless # Addon transforms. models.Prefetch( 'addon', queryset=Addon.unfiltered.all().only_translations()), ).order_by('-created'))
def get_object_list(self): clean_data = { key: value for key, value in self.cleaned_data.items() if value is not None } entity_versions_prefetch = models.Prefetch('entity__entityversion_set', to_attr='entity_versions') offer_year_entity_prefetch = models.Prefetch('offeryearentity_set', queryset=offer_year_entity.search(type=offer_year_entity_type.ENTITY_MANAGEMENT)\ .prefetch_related(entity_versions_prefetch), to_attr='offer_year_entities') if clean_data.get('requirement_entity_acronym'): clean_data['id'] = _get_filter_entity_management( clean_data['requirement_entity_acronym'], clean_data.get('with_entity_subordinated', False)) education_groups = education_group_year.search( **clean_data).prefetch_related(offer_year_entity_prefetch) return [ _append_entity_management(education_group) for education_group in education_groups ]
def prefetch_related_primary_username(self): """ Use this if need to get efficient access to the primary :class:`.UserName`. This will add the ``primary_username_objects`` attribute to each returned :class:`.User`. ``primary_username_objects`` is a list, and you should not use it directly. Use :meth:`.User.primary_username_object` or :meth:`.User.primary_username` to access the primary username. """ return self.prefetch_related( models.Prefetch('username_set', queryset=UserName.objects.filter(is_primary=True), to_attr='primary_username_objects'))
def get_lock_suggestions(self): # TODO optimize this qs = (PostExtraElection.objects.filter( election__current=True, candidates_locked=False).exclude(suggestedpostlock=None).exclude( officialdocument=None).select_related( "election", "post").prefetch_related( "officialdocument_set", models.Prefetch( "suggestedpostlock_set", SuggestedPostLock.objects.select_related("user"), ), models.Prefetch( "membership_set", Membership.objects.select_related( "person", "party").prefetch_related( "person__other_names"), ), ).order_by("?")) qs = qs.exclude(suggestedpostlock__user=self.request.user) return qs
def prefetch_syncsystemtag_objects(self): """ Prefetch :class:`.RelatedStudentSyncSystemTag` objects in the ``syncsystemtag_objects`` attribute. The ``syncsystemtag_objects`` attribute is a ``list`` of :class:`.RelatedStudentSyncSystemTag` objects ordered by ``tag`` in ascending order. """ return self.prefetch_related( models.Prefetch('relatedstudentsyncsystemtag_set', queryset=RelatedStudentSyncSystemTag.objects.order_by('tag'), to_attr='syncsystemtag_objects'))
def game_two(self, user): ''' Returns a tuple containing a queryset of Transcript objects and a list of phrase PKs to annotate. Phrases are excluded under the following circumstances: - User upvoted the original phrase - User has submitted a correction for this phrase - User has upvoted a submitted correction ''' ineligible_phrases = [ interaction.transcript_phrase.pk for interaction in TranscriptPhraseInteraction.objects.filter( user=user, preclude_from_game=2 ) ] eligible_phrases = TranscriptPhrase.objects.filter( current_game=2, active=True ).exclude( pk__in=ineligible_phrases ).only( 'current_game', 'transcript', 'pk' ).prefetch_related( models.Prefetch( 'transcript', queryset=self.only('pk') ) ) counter = Counter( phrase.transcript for phrase in eligible_phrases ).most_common() total = 0 transcripts = [] for transcript, count in counter: transcripts.append(transcript) total += count if total >= 20: break game_two_ready_phrases = [ phrase.pk for phrase in eligible_phrases.filter(transcript__in=transcripts) ][:20] transcripts_to_return = self.defer('transcript_data_blob').filter( phrases__in=game_two_ready_phrases).distinct() return (transcripts_to_return, game_two_ready_phrases)
def prefetch_recent_content_changes(self): """ Prefetch recent content changes. This is used in commit pending, where we need this for all pending units. """ return self.prefetch_related( models.Prefetch( "change_set", queryset=Change.objects.content().order().prefetch_related( "author"), to_attr="recent_content_changes", ))
def _make_examiner_map(self): """ Create a map of :class:`devilry.apps.core.models.Examiner` objects with user id as key. Returns: dict: Map of examiners. """ examinermap = {} group = AssignmentGroup.objects.prefetch_related( models.Prefetch('examiners', queryset=_get_examinerqueryset()) ).get(id=self.group.id) for examiner in group.examiners.all(): examinermap[examiner.relatedexaminer.user_id] = examiner return examinermap
def get_reverse_related_page_extensions(self, model_name, language=None, include_descendants=False): """ Return a query to get the page extensions of a given model type related to the current page extension instance. For example: for an organization, it will return all courses that are pointing to this organization via an OrganizationPlugin in any placeholder of the course page. """ is_draft = self.extended_object.publisher_is_draft # pylint: disable=no-member page_extension = self if is_draft else self.draft_extension page = page_extension.extended_object current_language = language or translation.get_language() language_clause = get_plugin_language_fallback_clause( current_language, is_draft) self_name = self._meta.model.__name__.lower() if include_descendants is True: bfs = ("extended_object__placeholders__cmsplugin__" f"courses_{self_name:s}pluginmodel__page__node") selector = { f"{bfs:s}__path__startswith": page.node.path, f"{bfs:s}__depth__gte": page.node.depth, } else: bfs = ("extended_object__placeholders__cmsplugin__courses_" f"{self_name:s}pluginmodel__page") selector = {bfs: page} # For a public page, we must filter out page extensions that are not published # in any language if is_draft is False: selector["extended_object__title_set__published"] = True page_extension_model = apps.get_model(app_label="courses", model_name=model_name) # pylint: disable=no-member return (page_extension_model.objects.filter( language_clause, extended_object__publisher_is_draft=is_draft, **selector, ).select_related("extended_object").prefetch_related( models.Prefetch( "extended_object__title_set", to_attr="prefetched_titles", queryset=Title.objects.filter(language=current_language), )).distinct().order_by("extended_object__node__path"))
def get_queryset(self): qs = super(RepresentativeDetailPositions, self).get_queryset() positions_qs = Position.objects.filter(published=True) theme = self.get_selected_theme() if theme: positions_qs = positions_qs.filter(themes__slug=theme) qs = qs.prefetch_related( models.Prefetch('positions', queryset=positions_qs.order_by('-datetime', 'pk')), 'positions__themes', 'positions__position_score') return qs
def get_list_view(cls): """ Function: Get List View Returns: [events]: Array of all Events """ event_translations = EventTranslation.objects.filter( language='de').select_related('creator') events = cls.objects.all().prefetch_related( models.Prefetch('event_translations', queryset=event_translations)).filter( event_translations__language='de') return events
def get_queryset(self): qs = super().get_queryset() qs = qs.prefetch_related( models.Prefetch("credebtors__expenses", to_attr="all_expenses"), models.Prefetch("credebtors__revenues", to_attr="all_revenues"), ) qs = qs.annotate( all_expenses_amount=models.Sum( "credebtors__expenses__amount", distinct=True ) ) qs = qs.annotate( all_expenses_count=models.Count("credebtors__expenses", distinct=True) ) qs = qs.annotate( all_revenues_amount=models.Sum( "credebtors__revenues__amount", distinct=True ) ) qs = qs.annotate( all_revenues_count=models.Count("credebtors__revenues", distinct=True) ) return qs
def _get_prefetch_related(cls, feature_type: FeatureType, output_crs: CRS) -> list[models.Prefetch]: """Summarize which fields read data from relations. This combines the input from flattened and complex fields, in the unlikely case both variations are used in the same feature. """ return [ models.Prefetch( orm_relation.orm_path, queryset=cls.get_prefetch_queryset(feature_type, orm_relation, output_crs), ) for orm_relation in feature_type.orm_relations ]