def anotarPracticas(practicas, **kwargs): practicas = Estado.anotarPracticas(practicas, **kwargs) turnos = Programada.objects.filter( practica=models.OuterRef("id")).order_by("-id") if kwargs.get("programada_por", False): practicas = practicas.annotate( id_programada_por=models.Subquery( turnos.values("usuario__id")[:1]), nombre_programada_por=models.Subquery( turnos.values("usuario__username")[:1])) if kwargs.get("horario_turno", False) or kwargs.get( "duracion_turno", False): practicas = practicas.annotate(inicio_turno=models.Subquery( turnos.values("inicio")[:1])).annotate( finalizacion_turno=models.Subquery( turnos.values("finalizacion")[:1])) if kwargs.get("duracion_turno", False): practicas = practicas.annotate( duracion_turno=models.ExpressionWrapper( models.F("finalizacion_turno") - models.F("inicio_turno"), models.DurationField())) if kwargs.get("reprogramaciones", False): practicas = practicas.annotate( reprogramaciones=models.ExpressionWrapper( models.Count("estado__programada") - 1, models.IntegerField())) return practicas
def calculate_score(self): """Calculate score. Return: Queryset: Queryset """ return self.annotate( score_content=Coalesce( Sum( Cast(KeyTextTransform("score", "content__src"), models.FloatField())), 0), total_content=Count('content'), score_channels=Coalesce( Sum( Cast( KeyTextTransform("score", "tree_channels__content__src"), models.FloatField())), 0), total_channels=Count('tree_channels__content'), ).annotate( sum_content=models.ExpressionWrapper( F('score_content') + F('score_channels'), # NOQA output_field=models.FloatField()), sum_tota=models.ExpressionWrapper( F('total_content') + F('total_channels'), output_field=models.FloatField()), ).annotate(average=models.ExpressionWrapper( F('sum_content') / F('sum_tota'), # NOQA output_field=models.FloatField()))
def with_active_flag(self): """ A query set where each result is annotated with an 'is_active' field that indicates if it's the most recent entry for that combination of keys. """ if self.model.KEY_FIELDS: return self.get_queryset().annotate( is_active=models.ExpressionWrapper( models.Q(pk__in=self._current_ids_subquery()), output_field=models.IntegerField(), )) return self.get_queryset().annotate(is_active=models.ExpressionWrapper( models.Q(pk=self.model.current().pk), output_field=models.IntegerField(), ))
def anotarPracticas(practicas, **kwargs): practicas = Estado.anotarPracticas(practicas, **kwargs) realizaciones = Realizada.objects.filter( practica=models.OuterRef("id")).order_by("-id") if kwargs.get("realizada_por", False): practicas = practicas.annotate( id_realizada_por=models.Subquery( realizaciones.values("usuario__id")[:1]), nombre_realizada_por=models.Subquery( realizaciones.values("usuario__username")[:1])) if kwargs.get("horario_realizacion", False) or kwargs.get( "duracion_realizacion", False): practicas = practicas.annotate(inicio_realizacion=models.Subquery( realizaciones.values("inicio")[:1])).annotate( finalizacion_realizacion=models.Subquery( realizaciones.values("finalizacion")[:1])) if kwargs.get("duracion_realizacion", False): practicas = practicas.annotate( duracion_realizacion=models.ExpressionWrapper( models.F("finalizacion_realizacion") - models.F("inicio_realizacion"), models.DurationField())) return practicas
def get_annotations( aggregation: str, aggregation_field: str = None, percentile: str = None, queryset: models.QuerySet = None, additional_filter: str = None, ) -> dict: if aggregation == Aggregation.COUNT: return {"value": models.Count('id')} if aggregation == Aggregation.PERCENT: if not additional_filter: raise ValidationError({"error": "'additionalFilter' is required for 'aggregation=percent'"}, code=422) complex_filter = ComplexFilter(model=queryset.model) additional_query, _ = complex_filter.generate_from_string(additional_filter) if not additional_query: raise ValidationError({"error": "Additional filter cannot be empty"}, code=422) return { "numerator": CountIf(additional_query), "denominator": models.Count("id"), "value": models.ExpressionWrapper( models.F("numerator") * 1.0 / models.F("denominator"), output_field=models.FloatField()) } if not aggregation_field: raise ValidationError({"error": f"'aggregationField' is required for 'aggregation={aggregation}'"}, code=422) if aggregation == Aggregation.DISTINCT: return {"value": models.Count(aggregation_field, distinct=True)} if aggregation == Aggregation.SUM: return {"value": models.Sum(aggregation_field)} if aggregation == Aggregation.AVERAGE: return {"value": models.Avg(aggregation_field)} if aggregation == Aggregation.MIN: return {"value": models.Min(aggregation_field)} if aggregation == Aggregation.MAX: return {"value": models.Max(aggregation_field)} if aggregation == Aggregation.PERCENTILE: if not percentile: raise ValidationError({"error": "'percentile' is required for 'aggregation=percentile'"}, code=422) model: models.Model = queryset.model field = None for field_name in aggregation_field.split("__"): field = getattr(field, field_name) if field else model._meta.get_field(field_name) if field.get_internal_type() != "FloatField": return {"value": Percentile(aggregation_field, percentile, output_field=models.FloatField())} return {"value": Percentile(aggregation_field, percentile)} raise ValidationError({"error": "Unknown value for param 'aggregation'"}, code=422)
def test_serialize_complex_func_index(self): index = models.Index( models.Func("rating", function="ABS"), models.Case( models.When(name="special", then=models.Value("X")), default=models.Value("other"), ), models.ExpressionWrapper( models.F("pages"), output_field=models.IntegerField(), ), models.OrderBy(models.F("name").desc()), name="complex_func_index", ) string, imports = MigrationWriter.serialize(index) self.assertEqual( string, "models.Index(models.Func('rating', function='ABS'), " "models.Case(models.When(name='special', then=models.Value('X')), " "default=models.Value('other')), " "models.ExpressionWrapper(" "models.F('pages'), output_field=models.IntegerField()), " "models.OrderBy(models.OrderBy(models.F('name'), descending=True)), " "name='complex_func_index')", ) self.assertEqual(imports, {"from django.db import models"})
def get_queryset(self): return super( ExpiredSnapshotManager, self).get_queryset().annotate(expires=models.ExpressionWrapper( F('date') + F('retention_policy__duration'), output_field=models.DateTimeField())).filter( expires__lt=timezone.now())
def with_calculated(self): """ Add caclulations to the queryset """ from wip.models import TaskAssignee allocated_query = ( TaskAssignee.objects .filter(task_id=models.OuterRef('task_id')) .values('task_id') .annotate(total=models.Sum('allocated_hours')) .order_by('task_id') .values('total') ) time_spent_query = models.Sum( models.ExpressionWrapper( models.F('task__time_entries__ended_at') - models.F('task__time_entries__started_at'), output_field=models.fields.DurationField() ) ) return self.annotate( qs_allocated_hours=models.Subquery(allocated_query), qs_time_spent_hours=time_spent_query )
def stats(self): return self.order_by('category').values('category')\ .annotate(count=models.Count('id'))\ .annotate( unread=models.ExpressionWrapper( models.F('count') - models.Sum('has_read'), output_field=models.IntegerField()))
def order_queryset_by_z_coord_desc(queryset, geometry_field="location"): """Order an queryset based on point geometry's z coordinate""" return queryset.annotate( z_coord=models.ExpressionWrapper( models.Func(geometry_field, function="ST_Z"), output_field=models.FloatField(), ) ).order_by("-z_coord")
def _filter_runs_by_delay_field(self, field: str) -> JobRunQuerySet: result = (JobRun.objects.all().annotate(delay=models.F( f"job__history_retention_policy__{field}")).annotate( delay_elapsed_at=models.ExpressionWrapper( models.F("started_at") + models.F("delay"), output_field=models.DateTimeField(), )).filter(delay_elapsed_at__lte=self.clean_time)) return result # type: ignore
def with_time_spent(self): """ Add sum of time entry duration to the queryset """ return self.annotate(qs_time_spent=models.Sum( models.ExpressionWrapper( models.F('tasks__time_entries__ended_at') - models.F('tasks__time_entries__started_at'), output_field=models.fields.DurationField())))
def receita(self): expression = models.ExpressionWrapper( F('itens_vendas__valor')*F('itens_vendas__quantidade'), output_field=models.DecimalField( max_digits=10, decimal_places=2, default=Decimal('0.00'))) return self.produtos.annotate(receita=expression).aggregate( Sum('receita'))['receita__sum'] or Decimal('0.0')
def get_order_total(self, include_delivery=True): total = self.orderitem_set.aggregate(total=models.Sum( models.ExpressionWrapper( models.F('sell_price') * models.F('quantity'), output_field=models.DecimalField())))['total'] total = total if total is not None else 0.0 if include_delivery: total = float(total) + float(self.delivery_price) return Decimal(total)
def get_queryset(self): # annotate & order_by desc instead of aggregate & Max to make it work with Subquery # aggregate returns a dict which doesn't work with Subquery # doesn't hit db while aggregate hits it immediately max_stamp = api.models.SensorData.objects.annotate( max_stamp=db.ExpressionWrapper( db.F('timestamp') - dt.timedelta(minutes=self.DELTA_MINUTES), output_field=db.DateTimeField())).order_by('-max_stamp').values('max_stamp')[:1] # doesn't hit db dataset = api.models.SensorData.objects.filter(timestamp__gt=db.Subquery(max_stamp)) return dataset
def indicator_lop_percent_met_annotation(): """annotates an indicator with the percent met using: - lop_target (currently lop_target field, but will shift to lop_target_calculated) - lop_actual""" return models.Case( models.When(models.Q(lop_target_calculated__isnull=True) | models.Q(lop_actual__isnull=True), then=models.Value(None)), default=models.ExpressionWrapper(models.F('lop_actual') / models.F('lop_target_calculated'), output_field=models.FloatField()))
def indicator_lop_percent_met_progress_annotation(): """percent met progress: actual progress (see above) / target progress (see above) """ return models.Case(models.When( models.Q( models.Q(lop_actual_progress__isnull=False) & models.Q(lop_target_progress__isnull=False)), then=models.ExpressionWrapper(models.F('lop_actual_progress') / models.F('lop_target_progress'), output_field=models.FloatField())), default=models.Value(None))
def uptime(cls): period = 24 time_threshold = now() - timedelta(hours=period) result = cls.objects \ .filter(timestamp__gt=time_threshold) \ .values('server_fqdn') \ .annotate(uptime=models.ExpressionWrapper( models.Count('server_fqdn'), output_field=models.FloatField() ) / (60.0 * period)) return [i for i in result]
def target_percent_met_annotation(): """ value for the % met column in the results table logic is explained in target_actual_annotation""" return models.Case( models.When( models.Q(models.Q(target=0) & models.Q(actual__isnull=False)), then=models.Value(None)), models.When(models.Q( models.Q(target__isnull=True) & models.Q(actual__isnull=True)), then=models.Value(None)), default=models.ExpressionWrapper(models.F('actual') / models.F('target'), output_field=models.FloatField()))
def habiles(self): """ Retorna los casos que han superado su fecha limite de acuerdo a los dias habiles establecidos """ # Falta hacer un filtro por fines de semana annotate = self.annotate( fecha_vencimiento=models.ExpressionWrapper( models.F('fecha_ingreso_habil') + datetime.timedelta(days=self.model.DIAS_PARA_EXPIRAR), output_field=models.DateField() ) ).filter(fecha_vencimiento__lt=timezone.now()).exclude(fecha_vencimiento__in=get_festivos(timezone.now().year)) # return (self.nuevos() | annotate).distinct() return self.filter(valido=True).exclude(cerrado=True)
def indicator_lop_met_real_annotation(): """for a reporting indicator, determines how close actual values are to target values""" return models.Case( models.When( models.Q(lop_target_sum__isnull=True) | models.Q(lop_actual_sum__isnull=True), then=models.Value(None) ), default=models.ExpressionWrapper( models.F('lop_actual_sum') / models.F('lop_target_sum'), output_field=models.FloatField() ) )
def indicator_defined_targets_months(): """annotates a queryset of indicators with the number of months their targets cover (number of targets * months in period) for time-aware target frequencies used by the program level get_defined_targets filter""" cases = [] for frequency, month_count in utils.TIME_AWARE_FREQUENCIES: cases.append( models.When(target_frequency=frequency, then=models.ExpressionWrapper( (models.F('defined_targets') * month_count), output_field=models.IntegerField()))) return models.Case(*cases, default=models.Value( None, output_field=models.IntegerField(null=True)))
def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) #Total chiffre d'affaire de chaque client clients = Client.objects.filter().annotate(chiffre=models.Sum(models.ExpressionWrapper(models.F('factures__lignes__qte'),output_field=models.FloatField()) * models.F('factures__lignes__produit__prix'))) table = ClientTable(clients) RequestConfig(self.request, paginate={"per_page": 8}).configure(table) context['table'] = table #URL qui pointe sur la vue de création context['creation_url'] = "/bill/client_table_create/" context['object'] = 'Client' context['title'] = 'La liste des clients :' return context
def group_the_query(self, qs, field_name, group_by_fields): can_display_fields = list( set(self.data) & set(['date', 'os', 'channel', 'country'])) group_by_fields = group_by_fields + can_display_fields return qs.values(*group_by_fields).annotate( impressions=models.Sum('impressions'), clicks=models.Sum('clicks'), installs=models.Sum('installs'), spend=models.Sum('spend'), revenue=models.Sum('revenue'), cpi=models.ExpressionWrapper( models.F('spend') / models.F('installs'), output_field=models.FloatField())).order_by(*group_by_fields)
def _add_distance_old(qs, helprequest_coordinates, as_int=False): """ This function is DEPRECATED and will be removed in future releases. Kept for compatibility with older system (and to keep new features for the upgraded one) :param qs: :param helprequest_coordinates: :param as_int: :return: """ qs = qs.annotate(y_distance=(F('city__y') - helprequest_coordinates[1]) ** 2) qs = qs.annotate(x_distance=(F('city__x') - helprequest_coordinates[0]) ** 2) qs = qs.annotate(distance=models.ExpressionWrapper(((F('x_distance') + F('y_distance')) ** 0.5) / 100, output_field=models.IntegerField() if as_int else models.FloatField())) return qs
def get_visible_to_user(cls, user): allowed_policies = [cls.ViewPolicies.PUBLIC, cls.ViewPolicies.LOGGEDIN] if user.is_staff: allowed_policies.append(cls.ViewPolicies.STAFF) if user.is_superuser: allowed_policies.append(cls.ViewPolicies.SUPERUSER) return (cls.objects.filter( models.Q(owned_by=user) | models.Q(view_policy__in=allowed_policies) | models.Q(view_policy=cls.ViewPolicies.GROUP, view_group__user=user)).annotate( is_owner=models.ExpressionWrapper( models.Q(owned_by__exact=user.pk), output_field=models.BooleanField(), )).order_by("-is_owner", "slug")).distinct()
def users_stats(self, users): if isinstance(users, models.QuerySet): users = models.Subquery(users) qs = self.order_by('user').values('user')\ .filter(user__in=users)\ .annotate(count=models.Count('id'))\ .annotate( unread=models.ExpressionWrapper( models.F('count') - models.Sum('has_read'), output_field=models.IntegerField() ) ) return qs
def add_scope_annotations(self, qs): # set the margins for reporting as over or under scope: over_scope = 1 + Indicator.ONSCOPE_MARGIN under_scope = 1 - Indicator.ONSCOPE_MARGIN return qs.annotate( # first establish the real lop-to-date progress against targets: lop_met_real=models.Case( models.When( models.Q(lop_target_sum__isnull=True) | models.Q(lop_actual_sum__isnull=True), then=models.Value(None) ), default=models.ExpressionWrapper( models.F('lop_actual_sum') / models.F('lop_target_sum'), output_field=models.FloatField() ) ) ).annotate( over_under=models.Case( # None for indicators missing targets or data: models.When( lop_met_real__isnull=True, then=models.Value(None) ), models.When( # over is negative if DOC is Negative models.Q(lop_met_real__gt=over_scope) & models.Q(direction_of_change=Indicator.DIRECTION_OF_CHANGE_NEGATIVE), then=models.Value(-1) ), models.When( lop_met_real__gt=over_scope, then=models.Value(1) ), models.When( # under is positive if DOC is Negative: models.Q(lop_met_real__lt=under_scope) & models.Q(direction_of_change=Indicator.DIRECTION_OF_CHANGE_NEGATIVE), then=models.Value(1) ), models.When( lop_met_real__lt=under_scope, then=models.Value(-1) ), default=models.Value(0), output_field=models.IntegerField(null=True) ) )
def _add_distance(qs, helprequest_coordinates, as_int=False): """ Uses pithagoras to calculate distance roughly, until an exact algorithm will be implemented. THE EARTH IS NOT FLAT, Israel is simply a small country :) :param qs: :param helprequest_coordinates: :param as_int: :return: """ qs = qs.annotate(latitude_distance=(F('location_latitude') - helprequest_coordinates[0]) ** 2) qs = qs.annotate(longitude_distance=(F('location_longitude') - helprequest_coordinates[1]) ** 2) qs = qs.annotate( distance=models.ExpressionWrapper(((F('latitude_distance') + F('longitude_distance')) ** 0.5) / 100, output_field=models.IntegerField() if as_int else models.FloatField())) return qs
def save(self, *args, **kwargs): # how many tasks need to be completed? required = self.pipeline.stages.all().count() completed = self.stage_runs.filter(completed_on__isnull=False).count() if required == completed: self.completed_on = datetime.datetime.now(tz=pytz.UTC) # did we complete successfully? if we're here and haven't been set to false yet, # then we can assume it's all good if self.success is None: self.success = True # if we were running (haven't failed critically), update to completed if self.status == 'Running': self.status = 'Completed' # do we want to auto-update weight? if not hasattr(settings, 'DJANGO_COG_AUTO_WEIGHT' ) or settings.DJANGO_COG_AUTO_WEIGHT: # pull the weight sample size sample_size = TASK_WEIGHT_SAMPLE_SIZE if hasattr(settings, 'DJANGO_COG_TASK_WEIGHT_SAMPLE_SIZE'): sample_size = settings.DJANGO_COG_TASK_WEIGHT_SAMPLE_SIZE # make a way to compute runtime from SQL: duration = models.ExpressionWrapper( models.F('completed_on') - models.F('started_on'), output_field=models.fields.DurationField()) # if we're completed, let's update the weights of all our tasks for stage in self.pipeline.stages.all(): # update each tasks in the stage for task in stage.assigned_tasks.all(): # do we have any runs that we can actually base an update off of? if not task.runs.filter(task__enabled=True).exists(): continue # get a sample of this tasks runs average_weight = task.runs.filter( task__enabled=True)[:sample_size].annotate( runtime=duration).aggregate( models.Avg('runtime') )['runtime__avg'].total_seconds() task.weight = average_weight task.save() super(PipelineRun, self).save(*args, **kwargs)