def qs_with_price(queryset: QuerySet, channel_slug: str) -> QuerySet: return queryset.annotate( min_variants_price_amount=Min( "variants__channel_listings__price_amount", filter=Q(variants__channel_listings__channel__slug=str(channel_slug)) & Q(variants__channel_listings__price_amount__isnull=False), ) )
def qs_with_published(queryset: QuerySet, channel_slug: str) -> QuerySet: validate_channel_slug(channel_slug) subquery = Subquery( ProductChannelListing.objects.filter( product_id=OuterRef("pk"), channel__slug=channel_slug).values_list("is_published")[:1]) return queryset.annotate(is_published=ExpressionWrapper( subquery, output_field=BooleanField()))
def qs_with_availability(queryset: QuerySet, channel_slug: str) -> QuerySet: subquery = Subquery( CollectionChannelListing.objects.filter( collection_id=OuterRef("pk"), channel__slug=str( channel_slug)).values_list("is_published")[:1]) return queryset.annotate(is_published=ExpressionWrapper( subquery, output_field=BooleanField()))
def qs_with_published_at(queryset: QuerySet, channel_slug: str) -> QuerySet: subquery = Subquery( ProductChannelListing.objects.filter( product_id=OuterRef("pk"), channel__slug=str( channel_slug)).values_list("published_at")[:1]) return queryset.annotate(published_at=ExpressionWrapper( subquery, output_field=DateTimeField()))
def annotate(queryset: QuerySet) -> QuerySet: return queryset.annotate(payloadExtern=Case( When(datetime_started__isnull=False, then=Subquery( TaskResult.objects.filter( task_id=OuterRef('task')).values('result'))), output_field=TextField(), ), )
def qs_with_publication_date(queryset: QuerySet, channel_slug: str) -> QuerySet: subquery = Subquery( CollectionChannelListing.objects.filter( collection_id=OuterRef("pk"), channel__slug=str( channel_slug)).values_list("publication_date")[:1]) return queryset.annotate(publication_date=ExpressionWrapper( subquery, output_field=DateField()))
def qs_with_collection(queryset: QuerySet, **_kwargs) -> QuerySet: return queryset.annotate(sort_order=Window( expression=DenseRank(), order_by=( F("collectionproduct__sort_order").asc(nulls_last=True), F("collectionproduct__id"), ), ))
def graph_content_search(queryset: QuerySet, search_text: str) -> QuerySet: return queryset.annotate( search=SearchVector( *(part % trans_type for trans_type in graph_info_translation_tables for part in graph_content_search_vector_parts), *content_search_vector_attrs ) ).filter(search=search_text.split(' ,."\'\\/[]{}!@#$%^&*()<>?:|-_=+`~'))
def annotate(queryset: QuerySet) -> QuerySet: queryset = Manager_Batches.annotate_assignments(queryset) return queryset.annotate( costs_max=F('count_assignments_total') * F('settings_batch__reward'), costs_so_far=F('count_assignments_approved') * F('settings_batch__reward'), )
def process_math(query: QuerySet, entity: Entity) -> QuerySet: if entity.math == "dau": # In daily active users mode count only up to 1 event per user per day query = query.annotate(count=Count("person_id", distinct=True)) elif entity.math in MATH_TO_AGGREGATE_FUNCTION: # Run relevant aggregate function on specified event property, casting it to a double query = query.annotate( count=MATH_TO_AGGREGATE_FUNCTION[entity.math]( Cast(RawSQL('"posthog_event"."properties"->>%s', (entity.math_property,)), output_field=FloatField(),) ) ) # Skip over events where the specified property is not set or not a number # It may not be ideally clear to the user what events were skipped, # but in the absence of typing, this is safe, cheap, and frictionless query = query.extra( where=['jsonb_typeof("posthog_event"."properties"->%s) = \'number\''], params=[entity.math_property], ) return query
def qs_with_product_count(queryset: QuerySet) -> QuerySet: return queryset.annotate(product_count=Coalesce( Subquery( Category.tree.add_related_count( queryset, Product, "category", "p_c", cumulative=True). values("p_c").filter(pk=OuterRef("pk"))[:1]), 0, output_field=IntegerField(), ))
def search_by_description(adverts: QuerySet, search_text: str) -> QuerySet: if search_text and search_text != "None": vector = SearchVector("description") query = SearchQuery(search_text) adverts = (adverts.annotate( search=vector, rank=SearchRank( vector, query)).filter(search=search_text).order_by("-rank")) return adverts
def qs_with_payment(queryset: QuerySet) -> QuerySet: subquery = Subquery( Payment.objects.filter(order_id=OuterRef("pk")) .order_by("-pk") .values_list("charge_status")[:1] ) return queryset.annotate( last_charge_status=ExpressionWrapper(subquery, output_field=CharField()) )
def graph_content_search(queryset: QuerySet, search_text: str) -> QuerySet: return queryset.annotate( search=SearchVector( *(part % trans_type for trans_type in graph_info_translation_tables for part in graph_content_search_vector_parts), *content_search_vector_attrs ) ).filter(search=search_text)
def annotate_groups_restricted(groups: QuerySet, job): unrestricted_users = get_unrestricted_users(User.objects.all(), job) unrestricted_groups = groups.filter( Q(group_permissions__user__in=unrestricted_users) | Q(group_permissions__user__isnull=True)) groups = groups.annotate( restricted=Case(When(id__in=unrestricted_groups, then=False), default=Value(True), output_field=models.BooleanField())) return groups
def annotate_usages_price(self, subscriptions: QuerySet) -> QuerySet: subscription_model = subscriptions.model data_usage_price = subscriptions.annotate( data_usage_price=Cast( Sum('datausagerecord__kilobytes_used') * subscription_model.ONE_KILOBYTE_PRICE, DecimalField() ) ) voice_usage_price = subscriptions.annotate( voice_usage_price=Cast( Sum('voiceusagerecord__seconds_used') * subscription_model.ONE_SECOND_PRICE, DecimalField() ) ) subscriptions = subscriptions.annotate( data_usage_price=Subquery(data_usage_price.values('data_usage_price'), output_field=DecimalField()), voice_usage_price=Subquery(voice_usage_price.values('voice_usage_price'), output_field=DecimalField()) ) return subscriptions
def aggregate_by_interval( filtered_events: QuerySet, team_id: int, entity: Entity, filter: Filter, interval: str, params: dict, breakdown: Optional[str] = None, ) -> Dict[str, Any]: interval_annotation = get_interval_annotation(interval) values = [interval] if breakdown: breakdown_type = params.get("breakdown_type") if breakdown_type == "cohort": cohort_annotations = add_cohort_annotations( team_id, json.loads(params.get("breakdown", "[]"))) values.extend(cohort_annotations.keys()) filtered_events = filtered_events.annotate(**cohort_annotations) breakdown = "cohorts" elif breakdown_type == "person": person_annotations = add_person_properties_annotations( team_id, params.get("breakdown", "")) filtered_events = filtered_events.annotate(**person_annotations) values.append(breakdown) else: values.append(breakdown) aggregates = (filtered_events.annotate(**interval_annotation).values( *values).annotate(count=Count(1)).order_by()) if breakdown: aggregates = aggregates.order_by("-count") aggregates = process_math(aggregates, entity) dates_filled = group_events_to_date( date_from=filter.date_from, date_to=filter.date_to, aggregates=aggregates, interval=interval, breakdown=breakdown, ) return dates_filled
def aggregate_by_interval( filtered_events: QuerySet, team_id: int, entity: Entity, filter: Filter, breakdown: Optional[str] = None, ) -> Dict[str, Any]: interval = filter.interval if filter.interval else "day" interval_annotation = get_interval_annotation(interval) values = [interval] if breakdown: if filter.breakdown_type == "cohort": cohort_annotations = add_cohort_annotations( team_id, filter.breakdown if filter.breakdown and isinstance(filter.breakdown, list) else []) values.extend(cohort_annotations.keys()) filtered_events = filtered_events.annotate(**cohort_annotations) breakdown = "cohorts" elif filter.breakdown_type == "person": person_annotations = add_person_properties_annotations( team_id, filter.breakdown if filter.breakdown and isinstance(filter.breakdown, str) else "") filtered_events = filtered_events.annotate(**person_annotations) values.append(breakdown) else: values.append(breakdown) aggregates = filtered_events.annotate(**interval_annotation).values( *values).annotate(count=Count(1)).order_by() if breakdown: aggregates = aggregates.order_by("-count") aggregates = process_math(aggregates, entity) dates_filled = group_events_to_date( date_from=filter.date_from, date_to=filter.date_to, aggregates=aggregates, interval=interval, breakdown=breakdown, ) return dates_filled
def base_currency_latest_values_filter(queryset: QuerySet, name: str, value: str) -> QuerySet: """ Returns a queryset of latest valeus for a base currency """ queryset = queryset.filter(base_currency=value) latest = queryset.filter( base_currency=OuterRef('base_currency')).order_by('-value_date') return queryset.annotate(base_currency_latest=Subquery( latest.values('value_date')[:1])).filter( value_date=models.F('base_currency_latest'))
def get_actions(queryset: QuerySet, params: dict, team_id: int) -> QuerySet: if params.get(TREND_FILTER_TYPE_ACTIONS): queryset = queryset.filter( pk__in=[action.id for action in Filter({"actions": json.loads(params.get("actions", "[]"))}).actions] ) if params.get("include_count"): queryset = queryset.annotate(count=Count(TREND_FILTER_TYPE_EVENTS)) queryset = queryset.prefetch_related(Prefetch("steps", queryset=ActionStep.objects.order_by("id"))) return queryset.filter(team_id=team_id).order_by("-id")
def with_subtask_verification_time(cls, query_set: QuerySet) -> QuerySet: task_to_compute_timestamp = Func(Value('epoch'), F('task_to_compute__timestamp'), function='DATE_PART') subtask_timeout = Func( Value('epoch'), F('computation_deadline'), function='DATE_PART') - task_to_compute_timestamp subtask_verification_time = (4 * settings.CONCENT_MESSAGING_TIME) + ( 3 * F('maximum_download_time')) + (0.5 * subtask_timeout) return query_set.annotate(subtask_verification_time=ExpressionWrapper( subtask_verification_time, output_field=IntegerField()))
def _unique_objects_per_day(self, query_objects: QuerySet) -> QuerySet: """Count the unique reports per version per day. Args: query_objects: The reports to count. Returns: The unique reports grouped per version per day. """ return (query_objects.annotate(_report_day=TruncDate("date")).values( self.version_field_name, "_report_day").annotate(count=Count("date")))
def get_personal_notes( self, persons: QuerySet, wanted_week: Optional[CalendarWeek] = None) -> PersonalNoteQuerySet: """Get all personal notes for that register object in a specified week. The week is optional for extra lessons and events as they have own date information. Returns all linked `PersonalNote` objects, filtered by the given week for `LessonPeriod` objects, creating those objects that haven't been created yet. ..note:: Only available when AlekSIS-App-Alsijil is installed. :Date: 2019-11-09 :Authors: - Dominik George <*****@*****.**> """ # Find all persons in the associated groups that do not yet have a personal note for this lesson if isinstance(self, LessonPeriod): q_attrs = dict(week=wanted_week.week, year=wanted_week.year, lesson_period=self) elif isinstance(self, Event): q_attrs = dict(event=self) else: q_attrs = dict(extra_lesson=self) missing_persons = persons.annotate(no_personal_notes=~Exists( PersonalNote.objects.filter(person__pk=OuterRef("pk"), **q_attrs)) ).filter( member_of__in=Group.objects.filter( pk__in=self.get_groups().all()), is_active=True, no_personal_notes=True, ) # Create all missing personal notes new_personal_notes = [ PersonalNote( person=person, **q_attrs, ) for person in missing_persons ] PersonalNote.objects.bulk_create(new_personal_notes) for personal_note in new_personal_notes: personal_note.groups_of_person.set( personal_note.person.member_of.all()) return (PersonalNote.objects.filter(**q_attrs, person__in=persons). select_related(None).prefetch_related(None).select_related( "person", "excuse_type").prefetch_related("extra_marks"))
def sort_by_payment(queryset: QuerySet, sort_by: SortInputObjectType) -> QuerySet: last_payments = ( queryset.exclude(payments__isnull=True) .annotate(payment_id=Max("payments__pk")) .values_list("payment_id", flat=True) ) queryset = queryset.annotate( last_payment=FilteredRelation( "payments", condition=Q(payments__pk__in=last_payments) ) ) return queryset.order_by(f"{sort_by.direction}last_payment__charge_status")
def apply(self, queryset: QuerySet) -> QuerySet: """Return a new `QuerySet` with this `Annotation` applied.""" if self.filters: kwargs = { self.to: self.func( self.field, filter=reduce(lambda i, j: i & j, (f.get() for f in self.filters)) ) } else: kwargs = {self.to: self.func(self.field)} return queryset.annotate(**kwargs)
def filter_by_stoken( stoken: t.Optional[str], queryset: QuerySet, stoken_annotation: StokenAnnotation ) -> t.Tuple[QuerySet, t.Optional[Stoken]]: stoken_rev = get_stoken_obj(stoken) queryset = queryset.annotate( max_stoken=stoken_annotation).order_by("max_stoken") if stoken_rev is not None: queryset = queryset.filter(max_stoken__gt=stoken_rev.id) return queryset, stoken_rev
def _get_sorting_annotation(queryset: models.QuerySet, field_name: str, annotations: dict) -> models.Case: queryset = queryset.values(field_name) queryset = queryset.annotate(**annotations) queryset = queryset.order_by('value') return models.Case(*[ models.When(**{ "{}".format(field_name): group[field_name], "then": index }) for index, group in enumerate(queryset) ], output_field=models.IntegerField())
def filter_vertrouwelijkheidaanduiding(self, base: QuerySet, value) -> QuerySet: if value is None: return base order_provided = VertrouwelijkheidsAanduiding.get_choice(value).order order_case = VertrouwelijkheidsAanduiding.get_order_expression( "max_vertrouwelijkheidaanduiding" ) # In this case we are filtering Autorisatie model to look for auth which meets our needs. # Therefore we're only considering authorizations here that have a max_vertrouwelijkheidaanduiding # bigger or equal than what we're checking for the object. # In cases when we are filtering data objects (Zaak, InformatieObject etc) it's the other way around return base.annotate(max_vertr=order_case).filter(max_vertr__gte=order_provided)
def filter_in_range( queryset: QuerySet, latitude: float, longitude: float, range_in_meters: int, latitude_column_name: str = "latitude", longitude_column_name: str = "longitude", field_name: str = "earthbox", lookup_exp: str = "in_georange", ): earthbox = { field_name: EarthBox(LLToEarth(latitude, longitude), range_in_meters) } lookup = "%s__%s" % (field_name, lookup_exp) in_range = {lookup: LLToEarth(latitude_column_name, longitude_column_name)} return queryset.annotate(**earthbox).filter(**in_range)
def add_sound_effect_count_annotation(user: User, qs: QuerySet): month_ago = timezone.now() - timezone.timedelta(days=30) my_total_play_count = Count("play_history", filter=Q(play_history__played_by=user), distinct=False) my_play_count_month = Count("play_history", filter=Q(play_history__played_by=user), distinct=False) total_play_count = Count("play_history", distinct=False) play_count_month = Count("play_history", filter=Q(play_history__played_at__gte=month_ago), distinct=False) return qs.annotate(my_total_play_count=my_total_play_count, my_play_count_month=my_play_count_month, total_play_count=total_play_count, play_count_month=play_count_month)