def indicator_lop_target_calculated_annotation(): """annotates an indicator with the sum of targets for the entire program (not taking active/inactive targets) into account - NOT for progress, for results display only""" return models.Case( models.When(models.Q( models.Q( models.Q(unit_of_measure_type=Indicator.PERCENTAGE) | models.Q(is_cumulative=True)) & models.Q( target_frequency__in=[Indicator.MID_END, Indicator.EVENT])), then=models.Subquery(PeriodicTarget.objects.filter( indicator=models.OuterRef('pk')).order_by( '-customsort').values('target')[:1], output_field=models.FloatField())), models.When(models.Q( models.Q(unit_of_measure_type=Indicator.PERCENTAGE) | models.Q(is_cumulative=True)), then=models.Subquery(PeriodicTarget.objects.filter( indicator=models.OuterRef('pk')).order_by( '-end_date').values('target')[:1], output_field=models.FloatField())), default=models.Subquery( PeriodicTarget.objects.filter(indicator=models.OuterRef( 'pk')).order_by().values('indicator').annotate( target_sum=models.Sum('target')).values('target_sum')[:1], output_field=models.FloatField()))
def get(self, request, *args, **kwargs): program_id = kwargs.get('program') programs = request.user.tola_user.available_programs.annotate( indicators_count=models.Count('indicator'), targets_exist=models.Exists( PeriodicTarget.objects.filter( indicator__program=models.OuterRef('pk') ) ), tva_indicators_count=models.Subquery( Indicator.rf_aware_objects.filter( program=models.OuterRef('pk'), target_frequency__in=Indicator.REGULAR_TARGET_FREQUENCIES + (Indicator.LOP, Indicator.MID_END) ).order_by().values('program').annotate(tva_count=models.Count('pk')).values('tva_count')[:1], output_field=models.IntegerField() ) ).filter( funding_status="Funded", targets_exist=True, reporting_period_start__isnull=False, reporting_period_end__isnull=False, indicators_count__gt=0 ).order_by('name').values_list('pk', 'name', 'tva_indicators_count') program_data = IPTTProgramSerializer.get_for_pk(program_id).data react_data = { 'programs_list': list(programs), 'program_data': program_data, } return self.render_to_response({'react_context': react_data})
class BaseParticipationViewSet(NestedViewSetMixin, rest_framework_bulk.BulkModelViewSet): queryset = Participation.objects.annotate( sent_emails_count=SQCount( ScheduledEmail.objects.exclude(sent=None).filter( stage__step__participations=models.OuterRef('pk')).values( 'pk').distinct()), opened_emails_count=SQCount( TrackingInfo.objects.filter( type=TrackingType.OPEN, email__scheduled__stage__step__participations=models.OuterRef( 'pk')).values('pk').distinct()), link_clicked_in_emails_count=SQCount( TrackingInfo.objects.filter( type=TrackingType.LINK_CLICKED, email__scheduled__stage__step__participations=models.OuterRef( 'pk')).values('pk').distinct()), replied_emails_count=SQCount( TrackingInfo.objects.filter( email__scheduled__stage__step__participations=models.OuterRef( 'pk')).exclude( email__in_reply_to=None).values('pk').distinct()), ) permission_classes = (permissions.DjangoModelPermissions, ) filter_backends = ( rest_framework.DjangoFilterBackend, filters.OrderingFilter, ) filter_class = ParticipationFilter
def get_max_deviations( self, submitter: UserProfile, exercises: Iterable[Union[BaseExercise, int]], ) -> Iterable[TModel]: """ Returns the maximum deviations for the given submitter in the given exercises (one deviation per exercise is returned). The deviation may be granted to the submitter directly, or to some other submitter in their group. """ deviations = ( self.filter( models.Q(exercise__in=exercises) & ( # Check that the owner of the deviation is the user, or # some other user who has submitted the deviation's # exercise with the user. models.Q(submitter=submitter) | models.Exists( # Note the two 'submitters' filters. Submission.objects.filter( exercise=models.OuterRef('exercise'), submitters=models.OuterRef('submitter'), ).filter(submitters=submitter, )))).select_related( 'exercise').order_by('exercise', self.max_order_by)) previous_exercise_id = None for deviation in deviations: if deviation.exercise.id == previous_exercise_id: continue previous_exercise_id = deviation.exercise.id yield deviation
def get_evidence_count(self, qs): """annotates qs with evidence_count= # of results that have evidence, and all_results_backed_up=Boolean""" data_with_evidence = CollectedData.objects.filter( models.Q(indicator_id=models.OuterRef('pk')) | models.Q(periodic_target__indicator_id=models.OuterRef('pk')), models.Q(evidence__isnull=False) | models.Q(tola_table__isnull=False) ).order_by().values('indicator_id') qs = qs.annotate( evidence_count=models.functions.Coalesce( models.Subquery( data_with_evidence.annotate( total_count=models.Count('date_collected') ).order_by().values('total_count')[:1], output_field=models.IntegerField() ), 0) ) qs = qs.annotate( all_results_backed_up=models.Case( models.When( # if no results, then it isn't "missing" data, so we count this as all_backed_up models.Q(reported_results=0) | models.Q( #models.Q(reported_results__isnull=False) & models.Q(evidence_count__isnull=False) & models.Q(evidence_count=models.F('reported_results')) ), then=models.Value(True) ), default=models.Value(False), output_field=models.BooleanField() ) ) return qs
def handle(self, *args, **options): # Prefer upcoming events over ongoing events updated_count = Hobby.objects.update(next_event_id=models.Subquery( HobbyEvent.objects.filter(hobby=models.OuterRef('pk'), start_date__gte=date.today()). values_list('id').order_by('start_date')[:1])) self.stdout.write(f'Updated {updated_count} hobbies') # If there are Hobbies with only ongoing events and no upcoming events # then fall back to the ongoing event fallback_updated_count = Hobby.objects.filter( next_event_id__isnull=True).update(next_event_id=models.Subquery( HobbyEvent.objects.filter(hobby=models.OuterRef('pk'), end_date__gte=date.today()). values_list('id').order_by('start_date')[:1])) self.stdout.write( f'Updated {fallback_updated_count} hobbies with fallback query') hobbies_without_next_event_count = Hobby.objects.filter( next_event_id__isnull=True).count() start_date_in_future_count = updated_count - fallback_updated_count end_date_in_future_count = fallback_updated_count - hobbies_without_next_event_count self.stdout.write( f'{start_date_in_future_count} hobbies with upcoming next event') self.stdout.write( f'{end_date_in_future_count} hobbies with ongoing next event') self.stdout.write( f'{hobbies_without_next_event_count} hobbies without next event')
def annotate_usage(self, *statuses): """ Returns the current queryset annotated with usage information from requirements that reference the resource. """ from .requirement import Requirement # Generate the annotations for each possible status annotations = dict( chain.from_iterable( (('{}_count'.format(status.name.lower()), models.Count('status', filter=models.Q(status=status))), ('{}_total'.format(status.name.lower()), models.Sum('amount', filter=models.Q(status=status)))) # If no statuses are given, use them all for status in (statuses or Requirement.Status))) # This subquery fetches the count and total of all requirements for the quota requirements = (Requirement.objects.filter( service__project__consortium=models.OuterRef('consortium'), resource=models.OuterRef('resource')).order_by().values( 'service__project__consortium', 'resource').annotate(**annotations)) # Apply the annotations to the current query return self.annotate( # Coalesce the corresponding annotation from the subquery **{ annotation: functions.Coalesce( models.Subquery(requirements.values(annotation)), models.Value(0)) for annotation in annotations })
def periods(self, periods): """annotate a query with actual data sums for a set of time periods""" period_annotations = {} for c, period in enumerate(periods): date_range = "{0}-{1}".format( period['start_date'].strftime('%Y-%m-%d'), period['end_date'].strftime('%Y-%m-%d') ) period_annotations[date_range] = models.Case( models.When( unit_of_measure_type=Indicator.PERCENTAGE, then=models.Subquery( CollectedData.objects.filter( indicator_id=models.OuterRef('pk'), date_collected__lte=period['end_date'] ).order_by('-date_collected').values('achieved')[:1], output_field=models.FloatField() )), default=models.Subquery( CollectedData.objects.filter( indicator_id=models.OuterRef('pk'), date_collected__lte=period['end_date'] ).filter( models.Q(date_collected__gte=period['start_date']) | models.Q(indicator__is_cumulative=True) ).order_by().values('indicator_id').annotate( total=models.Sum('achieved')).values('total'), output_field=models.FloatField()) ) period_annotations["period_{0}".format(c)] = models.Value( date_range, output_field=models.CharField()) return self.get_queryset().annotate(**period_annotations)
def deleteContentCb(sender, instance, **kwargs): from .models import ContentReference references = ContentReference.objects.filter(target=instance) other_references = ContentReference.objects.filter(~models.Q( target=instance)) nogroup_references = references.filter( deleteRecursive=DeleteRecursive.NO_GROUP.value, ) recursive_references = references.filter( deleteRecursive=DeleteRecursive.TRUE.value) # delete recursive connected contents sender.objects.filter(references__in=recursive_references).delete() nogroup_groups = set(nogroup_references.values_list("group", flat=True)) # delete contents if group vanishes and NO_GROUP is set delete_ids = models.Subquery( sender.objects.filter( models.Q(references__in=nogroup_references)).annotate( all_groups=models.Subquery( ContentReference.objects.filter( group__in=nogroup_groups, source=models.OuterRef("pk"), ).annotate(amount=models.Count("group", distinct=True))), remaining_groups=models.Subquery( other_references.filter( group__in=nogroup_groups, source=models.OuterRef("pk"), ).annotate(amount=models.Count("group", distinct=True))), ).filter(remaining_groups__lt=models.F("all_groups")).values("pk")) sender.objects.filter(id__in=delete_ids).delete()
def get_deleted(self, model, model_db=None): model_db = model_db or router.db_for_write(model) connection = connections[self.db] if self.db == model_db and connection.vendor in ("sqlite", "postgresql", "oracle"): pk_field_name = model._meta.pk.name object_id_cast_target = model._meta.get_field(pk_field_name) if django.VERSION >= (2, 1): # django 2.0 contains a critical bug that doesn't allow the code below to work, # fallback to casting primary keys then # see https://code.djangoproject.com/ticket/29142 if django.VERSION < (2, 2): # properly cast autofields for django before 2.2 as it was fixed in django itself later # see https://github.com/django/django/commit/ac25dd1f8d48accc765c05aebb47c427e51f3255 object_id_cast_target = { "AutoField": models.IntegerField(), "BigAutoField": models.BigIntegerField(), }.get(object_id_cast_target.__class__.__name__, object_id_cast_target) casted_object_id = Cast(models.OuterRef("object_id"), object_id_cast_target) model_qs = ( model._default_manager .using(model_db) .filter(**{pk_field_name: casted_object_id}) ) else: model_qs = ( model._default_manager .using(model_db) .annotate(_pk_to_object_id=Cast("pk", Version._meta.get_field("object_id"))) .filter(_pk_to_object_id=models.OuterRef("object_id")) ) # conditional expressions are being supported since django 3.0 # DISTINCT ON works only for Postgres DB if connection.vendor == "postgresql" and django.VERSION >= (3, 0): subquery = ( self.get_for_model(model, model_db=model_db) .filter(~models.Exists(model_qs)) .order_by("object_id", "-pk") .distinct("object_id") .values("pk") ) else: subquery = ( self.get_for_model(model, model_db=model_db) .annotate(pk_not_exists=~models.Exists(model_qs)) .filter(pk_not_exists=True) .values("object_id") .annotate(latest_pk=models.Max("pk")) .values("latest_pk") ) else: # We have to use a slow subquery. subquery = self.get_for_model(model, model_db=model_db).exclude( object_id__in=list( model._default_manager.using(model_db).values_list("pk", flat=True).order_by().iterator() ), ).values_list("object_id").annotate( latest_pk=models.Max("pk") ).order_by().values_list("latest_pk", flat=True) # Perform the subquery. return self.filter(pk__in=subquery)
def get_queryset(self): self.populate_top_percentile_dict() self.populate_allegation_dict() self.populate_award_dict() self.populate_history_dict() self.populate_badgenumber_dict() self.populate_salary_dict() self.populate_tags_dict() allegation_count = OfficerAllegation.objects.filter( officer=models.OuterRef('id')) sustained_count = OfficerAllegation.objects.filter( officer=models.OuterRef('id'), final_finding='SU') unsustained_count = OfficerAllegation.objects.filter( officer=models.OuterRef('id'), final_finding='NS') discipline_count = OfficerAllegation.objects.filter( officer=models.OuterRef('id'), disciplined=True) trr_count = TRR.objects.filter(officer=models.OuterRef('id')) return Officer.objects.all()\ .annotate(complaint_count=SQCount(allegation_count.values('id')))\ .annotate(sustained_complaint_count=SQCount(sustained_count.values('id')))\ .annotate(discipline_complaint_count=SQCount(discipline_count.values('id')))\ .annotate(annotated_trr_count=SQCount(trr_count.values('id')))\ .annotate(unsustained_complaint_count=SQCount(unsustained_count.values('id')))\ .annotate(trr_datetimes=ArrayAgg('trr__trr_datetime'))\ .annotate(cr_incident_dates=ArrayAgg('officerallegation__allegation__incident_date'))
def program_all_targets_defined_annotation(): """annotates a queryset of programs with whether all targets are defined for all indicators for that program""" targets_subquery = PeriodicTarget.objects.filter( indicator__deleted__isnull=True, indicator_id=models.OuterRef('pk') ).order_by().values('indicator_id').annotate( target_count=models.Count('pk') ).values('target_count')[:1] target_subquery = Indicator.objects.filter( deleted__isnull=True, program_id=models.OuterRef('pk') ).order_by().values('program_id').annotate( defined_targets=models.Subquery( targets_subquery, output_field=models.IntegerField() ) ).annotate(defined_targets_months=indicator_defined_targets_months()) return models.functions.Coalesce( models.Subquery( target_subquery.filter( program_get_defined_targets_filter() ).order_by().values('program_id').annotate( all_defined_targets_count=models.Count('id') ).values('all_defined_targets_count')[:1], output_field=models.IntegerField() ), 0)
def get_for_gq(cls, user, only_member=False): """ Used by graphql schema """ current_user_role_subquery = models.Subquery( ProjectMembership.objects.filter( project=models.OuterRef('pk'), member=user, ).order_by('role__level').values('role__type')[:1], output_field=models.CharField(), ) current_user_membership_data_subquery = JSONObject( user_id=models.Value(user.id), role=models.F('current_user_role'), badges=models.Subquery( ProjectMembership.objects.filter( project=models.OuterRef('pk'), member=user, ).order_by('badges').values('badges')[:1], output_field=ArrayField(models.CharField()), ), ) visible_projects = cls.objects\ .annotate( # For using within query filters current_user_role=current_user_role_subquery, ).annotate( # NOTE: This is used by permission module current_user_membership_data=current_user_membership_data_subquery, # NOTE: Exclude if project is private + user is not a member ).exclude(is_private=True, current_user_role__isnull=True) if only_member: return visible_projects.filter(current_user_role__isnull=False) return visible_projects
def get_annotated_responses_qs(include_comments=False, include_time=False): """Retrieve a queryset for the set of responses belonging to a set of studies.""" # Create the subquery where we get the action from the most recent ruling. newest_ruling_subquery = models.Subquery( ConsentRuling.objects.filter(response=models.OuterRef("pk")).order_by( "-created_at").values("action")[:1]) # Annotate that value as "current ruling" on our response queryset. annotated_query = ( Response.objects.prefetch_related("consent_rulings").filter( completed_consent_frame=True).annotate(current_ruling=Coalesce( newest_ruling_subquery, models.Value(PENDING)))) if include_comments: comment_subquery = models.Subquery( ConsentRuling.objects.filter(response=models.OuterRef( "pk")).order_by("-created_at").values("comments")[:1]) annotated_query = annotated_query.annotate( ruling_comments=Coalesce(comment_subquery, models.Value("N/A"))) if include_time: time_subquery = models.Subquery( ConsentRuling.objects.filter(response=models.OuterRef( "pk")).order_by("-created_at").values("created_at")[:1]) annotated_query = annotated_query.annotate( time_of_ruling=time_subquery) return annotated_query
def create_courseregistration_periods(apps, schema_editor): CourseDiscount = apps.get_model('leprikon', 'CourseDiscount') CourseRegistration = apps.get_model('leprikon', 'CourseRegistration') CourseRegistrationPeriod = apps.get_model('leprikon', 'CourseRegistrationPeriod') SchoolYearPeriod = apps.get_model('leprikon', 'SchoolYearPeriod') today = date.today() periods = defaultdict(list) for period in SchoolYearPeriod.objects.all(): period.first = period.school_year_division_id not in periods periods[period.school_year_division_id].append(period) for registration in CourseRegistration.objects.annotate( school_year_division_id=models.F( 'subject__course__school_year_division_id')).iterator(): CourseRegistrationPeriod.objects.bulk_create( CourseRegistrationPeriod( registration=registration, period=period, payment_requested=(period.due_from < today if registration. payment_requested else False), ) for period in periods[registration.school_year_division_id] if period.end >= registration.created.date() and (registration.canceled is None or period.start <= registration.canceled.date() or period.first)) CourseDiscount.objects.update(registration_period_id=models.Subquery( CourseRegistrationPeriod.objects.filter( registration_id=models.OuterRef('registration_id'), period_id=models.OuterRef('period_id'), ).values('id')[:1]), ) invalid_course_discounts = CourseDiscount.objects.filter( registration_period=None) invalid_course_discounts.delete()
def annotate_for_analysis_pillar_summary(cls, qs): analytical_statement_prefech = models.Prefetch( 'analyticalstatement_set', queryset=(AnalyticalStatement.objects.annotate( entries_count=models.Count('entries', distinct=True)))) return qs\ .prefetch_related(analytical_statement_prefech)\ .annotate( dragged_entries=models.functions.Coalesce( models.Subquery( AnalyticalStatement.objects.filter( analysis_pillar=models.OuterRef('pk') ).order_by().values('analysis_pillar').annotate(count=models.Count( 'entries', distinct=True, filter=models.Q(entries__lead__published_on__lte=models.OuterRef('analysis__end_date')))) .values('count')[:1], output_field=models.IntegerField(), ), 0), discarded_entries=models.functions.Coalesce( models.Subquery( DiscardedEntry.objects.filter( analysis_pillar=models.OuterRef('pk') ).order_by().values('analysis_pillar__analysis').annotate(count=models.Count( 'entry', distinct=True, filter=models.Q(entry__lead__published_on__lte=models.OuterRef('analysis__end_date')))) .values('count')[:1], output_field=models.IntegerField(), ), 0), analyzed_entries=models.F('dragged_entries') + models.F('discarded_entries'), )
def annotate_with_app_statistics(self) -> QuerySet[User]: pets_likes_count = User.objects.annotate(pets_likes_count=models.Count( 'users_pet_choices', filter=models.Q(users_pet_choices__is_favorite=True))).filter( pk=models.OuterRef('pk')) pets_dislikes_count = User.objects.annotate( pets_dislikes_count=models.Count( 'users_pet_choices', filter=models.Q(users_pet_choices__is_favorite=False))).filter( pk=models.OuterRef('pk')) pets_getpet_requests_count = User.objects.annotate( pets_getpet_requests_count=models.Count( 'get_pet_requests', )).filter(pk=models.OuterRef('pk')) return self.annotate( pets_likes_count=models.Subquery( pets_likes_count.values('pets_likes_count'), output_field=models.DateTimeField()), pets_dislikes_count=models.Subquery( pets_dislikes_count.values('pets_dislikes_count'), output_field=models.IntegerField()), pets_getpet_requests_count=models.Subquery( pets_getpet_requests_count.values( 'pets_getpet_requests_count'), output_field=models.IntegerField()), )
def persons_with_multiple_jobs(self, jobs=None): """Take only persons with multiple jobs that has the given jobs. For every person: > Filter all conditions and validate it empty ===> Not contains different positions (from the given). > Annotate which each position in the given jobs. > Take only the persons with '1' value on all the positions annotations. ===> Exactly only given positions. """ HospitalWorker = apps.get_model(Covid19Config.app_name, "HospitalWorker") not_given_job_condition = models.Q() if jobs is not None: # Defining the given jobs conditions for matching worker. matching_workers_conditions = {job: 1 for job in jobs} # Value for job existence annotation per each person. jobs_existence_annotation = { job: models.Count( models.Subquery( HospitalWorker.objects.filter( position=job, person=models.OuterRef( "person")).values("position"))) for job in jobs } # Make the for detecting person with not given job. for job in jobs: not_given_job_condition &= ~models.Q(position=job) # Search matching workers. matching_workers = HospitalWorker.objects.annotate( not_matching_jobs=models.Count( models.Subquery( HospitalWorker.objects.filter( not_given_job_condition, person=models.OuterRef("person")).values( "person"))), **jobs_existence_annotation).filter( # Filter only the workers with 0 not matching jobs. not_matching_jobs=0, **matching_workers_conditions).values("person") else: # All of the workers are in potential matching workers. matching_workers = HospitalWorker.objects.all().values("person") multiple_jobs_workers = HospitalWorker.objects.filter( person__in=matching_workers).values( "person", "position", "department").annotate(jobs=models.Count("person")).filter( jobs__gte=2, ).values("person") return self.filter(id__in=multiple_jobs_workers)
def get_deleted(self, model, model_db=None): model_db = model_db or router.db_for_write(model) connection = connections[self.db] # If safedelete is being used then this function helps # determine if our model is a SafeDeleteModel. If not installed # then never is. try: from safedelete.models import is_safedelete_cls except ImportError: is_safedelete_cls = lambda: False if self.db == model_db and connection.vendor in ("sqlite", "postgresql", "oracle"): if is_safedelete_cls(model): # Hack: added this hack to account for safe-delete # We need to check not that the instance doesn't exist at all, but that it doesn't exist # with a `deleted=null` field (it will still exist but deleted field will be set to datetime of # safedelete) model_qs = (model._default_manager.using(model_db).annotate( _pk_to_object_id=Cast( "pk", Version._meta.get_field("object_id"))).filter( _pk_to_object_id=models.OuterRef("object_id"), deleted__isnull=True)) else: model_qs = (model._default_manager.using(model_db).annotate( _pk_to_object_id=Cast( "pk", Version._meta.get_field("object_id"))).filter( _pk_to_object_id=models.OuterRef("object_id"))) subquery = (self.get_for_model(model, model_db=model_db).annotate( pk_not_exists=~models.Exists(model_qs)).filter( pk_not_exists=True).values("object_id").annotate( latest_pk=models.Max("pk")).values("latest_pk")) else: if is_safedelete_cls(model): # Hack: added this hack to account for safe-delete subquery = self.get_for_model( model, model_db=model_db).exclude( object_id__in=list( model._default_manager.using(model_db).filter( deleted__isnull=True).values_list( "pk", flat=True).order_by().iterator()), ).values_list("object_id").annotate( latest_pk=models.Max("pk")).order_by().values_list( "latest_pk", flat=True) else: # We have to use a slow subquery. subquery = self.get_for_model( model, model_db=model_db).exclude( object_id__in=list( model._default_manager.using(model_db).values_list( "pk", flat=True).order_by().iterator()), ).values_list("object_id").annotate( latest_pk=models.Max("pk")).order_by().values_list( "latest_pk", flat=True) # Perform the subquery. return self.filter(pk__in=subquery)
def qs_with_widget_type(cls): # TODO: maybe use properties? # Return exportable queryset with addtional field `widget_type` return cls.objects.annotate( widget_type=models.Subquery(Widget.objects.filter( key=models.OuterRef('widget_key'), analysis_framework=models.OuterRef('analysis_framework'), ).values('widget_id')[:1], output_field=models.CharField()))
def indicator_lop_actual_progress_annotation(): """actual progress: - only data associated with active periodic targets - summed if NUMBER, latest if PERCENTAGE """ return models.Case( models.When( models.Q( models.Q( models.Q( models.Q(target_frequency=Indicator.LOP) & # models.Q(program__reporting_period_end__lte=models.functions.Now()) models.Q(program__reporting_period_end__lt=UTCNow())) | models.Q(target_frequency__in=[ Indicator.MID_END, Indicator.EVENT ])) & models.Q(unit_of_measure_type=Indicator.PERCENTAGE)), then=models.Subquery( Result.objects.filter( periodic_target__isnull=False, indicator=models.OuterRef('pk')).order_by( '-date_collected').values('achieved')[:1])), models.When( models.Q( models.Q( models.Q(target_frequency=Indicator.LOP) & # models.Q(program__reporting_period_end__lte=models.functions.Now()) models.Q(program__reporting_period_end__lt=UTCNow())) | models. Q(target_frequency__in=[Indicator.MID_END, Indicator.EVENT])), then=models.Subquery( Result.objects.filter( periodic_target__isnull=False, indicator=models.OuterRef('pk')).order_by().values( 'indicator').annotate(actual_sum=models.Sum( 'achieved')).values('actual_sum')[:1])), models.When(models.Q( models.Q( target_frequency__in=[f[0] for f in TIME_AWARE_FREQUENCIES]) & models.Q(unit_of_measure_type=Indicator.PERCENTAGE)), then=models.Subquery( Result.objects.filter( models.Q(periodic_target__isnull=False) & models.Q(indicator=models.OuterRef('pk')) & models.Q(periodic_target__end_date__lt=UTCNow()) ).order_by('-date_collected').values('achieved')[:1])), models.When( target_frequency__in=[f[0] for f in TIME_AWARE_FREQUENCIES], then=models.Subquery( Result.objects.filter( models.Q(periodic_target__isnull=False) & models.Q(indicator=models.OuterRef('pk')) & # models.Q(periodic_target__end_date__lt=models.functions.Now()) models.Q(periodic_target__end_date__lt=UTCNow())).order_by( ).values('indicator').annotate(actual_sum=models.Sum( 'achieved')).values('actual_sum')[:1])), default=models.Value(None), output_field=models.DecimalField(decimal_places=2))
def with_event_status(self): return self.annotate( calculated_has_opened=models.Exists( Event.objects.filter(mail=models.OuterRef("id"), name=EVENT_TYPES.OPENED)), calculated_has_delivered=models.Exists( Event.objects.filter(mail=models.OuterRef("id"), name=EVENT_TYPES.DELIVERED)), calculated_has_clicked=models.Exists( Event.objects.filter(mail=models.OuterRef("id"), name=EVENT_TYPES.CLICKED)), )
def debtors(self, level): from contests.models import Credit return self.filter(~models.Q(level=level)).annotate( credit_id=models.Subquery(Credit.objects.filter(course__level=level, user_id=models.OuterRef('user_id')).values('id')) ).annotate( credit_score=models.Subquery(Credit.objects.filter(course__level=level, user_id=models.OuterRef('user_id')).values('score')) ).filter(credit_score__lte=2)
def with_credits(self): from contests.models import Credit return self.annotate( credit_id=models.Subquery(Credit.objects.filter(course__level=models.OuterRef('level'), user_id=models.OuterRef('user_id')).values('id')) ).annotate( credit_score=models.Subquery(Credit.objects.filter(course__level=models.OuterRef('level'), user_id=models.OuterRef('user_id')).values('score')) )
def with_weekend_sales(self): subquery = Order.objects.filter( created_at__year=models.OuterRef('year'), created_at__month=models.OuterRef('month'), created_at__week_day__in=[7, 1]).values_list( models.Func('lines__gross_amount', function='SUM'), ) return self.annotate(weekend_sales=models.Subquery(subquery), )
def with_submitted_response_id(self): from surveys.models import SurveyResponse submitted_responses = SurveyResponse.objects.filter( survey=models.OuterRef('survey'), organisation=models.OuterRef('grantee'), submitted__isnull=False, ).order_by('-modified') return self.annotate(response_id=models.Subquery( submitted_responses.values('pk')[:1]), )
def links_by_user(username, user=None): qs_reactions = Reaction.objects.filter(post=models.OuterRef('pk'), owner=user) qs_added = Post.objects.filter(origin=models.OuterRef('pk'), owner=user) return Post.objects.filter(owner__user__username=username).annotate( is_reacted=models.Exists(queryset=qs_reactions)).annotate( is_added=models.Exists( queryset=qs_added)).order_by('-created_at')
def feedback_filter(self, queryset, name, value): feedbacks = EventFeedback.objects.filter( event__summit__id=models.OuterRef('summit__id'), event__category__id=models.OuterRef('id')) queryTmp = queryset.annotate( has_feedback=models.Exists(feedbacks)).filter(has_feedback=True) queryTmp = queryTmp.annotate(rate=SubQueryAvg(feedbacks, field="rate")) return queryTmp
def populate_latest_revision(apps, schema_editor): Page = apps.get_model("wagtailcore.Page") Revision = apps.get_model("wagtailcore.Revision") latest_revision_id = models.Subquery( Revision.objects.filter( content_type_id=models.OuterRef("content_type_id"), object_id=Cast(models.OuterRef("pk"), models.CharField()), ) .order_by("-created_at", "-id") .values("pk")[:1] ) Page.objects.all().update(latest_revision_id=latest_revision_id)
def with_gross_total_sales(self): subquery = Order.objects.filter( created_at__year=models.OuterRef('year'), created_at__month=models.OuterRef('month'), ).values( year=ExtractYear('created_at'), month=ExtractMonth('created_at'), ).annotate(gross_total=models.Sum('lines__gross_amount'), ).values( 'gross_total', ) return self.annotate(gross_total_sales=models.Subquery(subquery), )