def test_join_key_transform_annotation_expression(self): related_obj = RelatedJSONModel.objects.create( value={"d": ["f", "e"]}, json_model=self.objs[4], ) RelatedJSONModel.objects.create( value={"d": ["e", "f"]}, json_model=self.objs[4], ) self.assertSequenceEqual( RelatedJSONModel.objects.annotate( key=F("value__d"), related_key=F("json_model__value__d"), chain=F("key__1"), expr=Cast("key", models.JSONField()), ).filter(chain=F("related_key__0")), [related_obj], )
def filter(self, qs, value): if value in EMPTY_VALUES: return qs valid_lookups = qs.model._meta.get_field(self.field_name).get_lookups() try: value = json.loads(value) except json.decoder.JSONDecodeError: raise ValidationError("JSONValueFilter value needs to be json encoded.") for expr in value: if expr in EMPTY_VALUES: # pragma: no cover continue if not all(("key" in expr, "value" in expr)): raise ValidationError( 'JSONValueFilter value needs to have a "key" and "value" and an ' 'optional "lookup" key.' ) lookup_expr = expr.get("lookup", self.lookup_expr) if lookup_expr not in valid_lookups: raise ValidationError( f'Lookup expression "{lookup_expr}" not allowed for field ' f'"{self.field_name}". Valid expressions: ' f'{", ".join(valid_lookups.keys())}' ) # "contains" behaves differently on JSONFields as it does on TextFields. # That's why we annotate the queryset with the value. # Some discussion about it can be found here: # https://code.djangoproject.com/ticket/26511 if isinstance(expr["value"], str): qs = qs.annotate( field_val=Cast( KeyTextTransform(expr["key"], self.field_name), CharField() ) ) lookup = {f"field_val__{lookup_expr}": expr["value"]} else: lookup = { f"{self.field_name}__{expr['key']}__{lookup_expr}": expr["value"] } qs = qs.filter(**lookup) return qs
def monitor_ripple_to_dash_transaction(transaction_id): logger.info('Withdrawal {}. Monitoring'.format(transaction_id)) transaction = models.WithdrawalTransaction.objects.get(id=transaction_id) ripple_gateway_address = models.RippleWalletCredentials.get_solo().address ripple_transactions_balance = RippleTransaction.objects.filter( destination_tag=transaction.destination_tag, currency='DSH', issuer=ripple_gateway_address, status=RippleTransaction.RECEIVED, ).annotate(value_decimal=Cast( 'value', DecimalField(max_digits=182, decimal_places=96), ), ).aggregate(Sum('value_decimal'))['value_decimal__sum'] if ripple_transactions_balance is not None: ripple_transactions_balance = ripple_transactions_balance.normalize() logger.info( 'Withdrawal {}. Received {} of {} DSH'.format( transaction_id, ripple_transactions_balance, transaction.get_normalized_dash_to_transfer(), ), ) if ripple_transactions_balance >= transaction.dash_to_transfer: transaction.state = transaction.CONFIRMED transaction.save(update_fields=('state', )) send_dash_transaction.delay(transaction_id) return else: logger.info( 'Withdrawal {}. No transaction found yet'.format(transaction_id), ) expiration_minutes = ( models.GatewaySettings.get_solo().transaction_expiration_minutes) if transaction.timestamp + timedelta(minutes=expiration_minutes) < now(): transaction.state = transaction.OVERDUE transaction.save(update_fields=('state', )) logger.info('Withdrawal {}. Became overdue'.format(transaction_id)) else: raise monitor_ripple_to_dash_transaction.retry( (transaction_id, ), countdown=60, )
def posts(req): POSTS_PER_FETCH = 30 newer_than = try_str_to_int( req.GET.get('newer_than', None) ) older_than = try_str_to_int( req.GET.get('older_than', None) ) only_user_posts = req.GET.get('pid', None) if newer_than: query = Q(pk__gt=newer_than) prefix = '' elif older_than: query = Q(pk__lt=older_than) prefix = '-' else: query = Q() prefix = '-' if only_user_posts: pid = try_str_to_int(only_user_posts) profile = get_object_or_404(DP, pk=pid) posts = ( Post.objects .filter(profile=profile, is_deleted=False) .select_related('profile') .prefetch_related('photos') ) else: posts = req.user.profile.matching_posts ordering = [ prefix + 'created_date', '-is_user_vip', prefix + 'created' ] posts = ( posts .filter(query) .annotate(created_date=Cast('created', DateField())) .annotate( is_user_vip=DP.is_vip_annotation('profile__status')) .order_by(*ordering) [:POSTS_PER_FETCH] ) return JsonResponse({'posts': serialize_posts(posts)})
def measurements_for_forecast(cls, measurements, aq_attributes, start_date, end_date): from .constants import DATETIME_FORMAT, POLLUTANT_TO_MONITOR from forecasting.utils import get_datetime_span_dict print(start_date, end_date) dates = get_datetime_span_dict(start_date, end_date) data = {'date': []} averages = {} measurements = measurements.filter(date_time_start__gte=start_date, date_time_start__lte=end_date) # Get the average for each aq_attr to mend missing pieces # and create the data columns for attr in aq_attributes: avg = measurements.filter(monitor_id=attr).annotate( value_float=Cast('value', models.FloatField())).aggregate( Avg('value_float')) averages[attr] = round(avg['value_float__avg'], 2) data[POLLUTANT_TO_MONITOR[attr]] = [] for m in measurements: date_str = timezone.localtime( m.date_time_start).strftime(DATETIME_FORMAT) # print(date_str, m.monitor, m.value) # if date_str not in dates.keys(): # dates[date_str] = [] # print (date_str) dates[date_str].append((m.monitor.monitor_id, m.value)) for date, monitors in dates.items(): # print(date, len(monitors)) data['date'].append(date) aq_attr_checklist = copy.copy(aq_attributes) for monitor in monitors: aq_attr_checklist.remove(monitor[0]) moni = POLLUTANT_TO_MONITOR[monitor[0]] data[moni].append(monitor[1]) for attr in aq_attr_checklist: moni = POLLUTANT_TO_MONITOR[attr] data[moni].append(averages[attr]) print(averages) return data
def fill_month(site, month_for, student_modules=None, overwrite=False, use_raw=False): """Fill a month's site monthly metrics for the specified site """ if not student_modules: student_modules = get_student_modules_for_site(site) if student_modules: if not use_raw: month_sm = student_modules.filter(modified__year=month_for.year, modified__month=month_for.month) mau_count = month_sm.values_list('student_id', flat=True).distinct().count() else: if RELEASE_LINE == 'ginkgo': site_ids = tuple([ int(sid) for sid in student_modules.values_list( 'id', flat=True).distinct() ]) else: # make sure we get integers and not longints from db from django.db.models.functions import Cast site_ids = tuple( student_modules.annotate( id_as_int=Cast('id', IntegerField())).values_list( 'id_as_int', flat=True).distinct()) statement = _get_fill_month_raw_sql_for_month(site_ids, month_for) with connection.cursor() as cursor: cursor.execute(statement) row = cursor.fetchone() mau_count = row[0] else: mau_count = 0 obj, created = SiteMonthlyMetrics.add_month(site=site, year=month_for.year, month=month_for.month, active_user_count=mau_count, overwrite=overwrite) return obj, created
def build_totals(self, response: List[dict]) -> dict: # Need to use a Postgres in this case since we only look at the first 10k results for Elasticsearch. # Since the endpoint is performing aggregations on the entire matview with no grouping or joins # the query takes minimal time to complete. if self.agg_key == settings.ES_ROUTING_FIELD: annotations = { "cast_def_codes": Cast("def_codes", ArrayField(TextField())) } filters = [ Q(cast_def_codes__overlap=self.def_codes), self.has_award_of_provided_type(should_join_awards=False), ] aggregations = { "face_value_of_loan": Sum("total_loan_value"), "obligation": Sum("obligation"), "outlay": Sum("outlay"), } aggregations = { col: aggregations[col] for col in self.sum_column_mapping.keys() } aggregations["award_count"] = Count("award_id") if self.filters.get("query"): filters.append( Q(recipient_name__icontains=self.filters["query"]["text"])) totals = (CovidFinancialAccountMatview.objects.annotate( **annotations).filter(*filters).values().aggregate( **aggregations)) return totals totals = {key: 0 for key in self.sum_column_mapping.keys()} award_count = 0 for bucket in response: for key in totals.keys(): totals[key] += get_summed_value_as_float( bucket, self.sum_column_mapping[key]) award_count += int(bucket.get("doc_count", 0)) totals["award_count"] = award_count return totals
def get(self, request, username): user = User.objects.filter(username=username).first() user_exclude_guest = User.objects.filter(delYn=False).exclude(username__startswith='손님').order_by('username') honors = Honor.objects.all() users_honor = user_exclude_guest.annotate( tot_count=Subquery( honors.values('user').annotate(tot_count=Count('user')).filter(user_id=OuterRef('id')).values( 'tot_count')[:1]), king_count=Subquery(honors.filter(position=1).values('user').annotate(king_count=Count('user')).filter( user_id=OuterRef('id')).values('king_count')[:1]), slave_count=Subquery( honors.filter(gamerTotCnt=F("position")).values('user').annotate(slave_count=Count('user')).filter( user_id=OuterRef('id')).values('slave_count')[:1]), king_per=Cast(F("king_count"), FloatField())*100/Cast(F("tot_count"), FloatField()), slave_per=Cast(F("slave_count"), FloatField())*100/Cast(F("tot_count"), FloatField()), ) users_pick_honor = user_exclude_guest.annotate( tot_count=Subquery( honors.filter(round=0).values('user').annotate(tot_count=Count('user')).filter(user_id=OuterRef('id')).values( 'tot_count')[:1]), king_count=Subquery(honors.filter(round=0, position=1).values('user').annotate(king_count=Count('user')).filter( user_id=OuterRef('id')).values('king_count')[:1]), slave_count=Subquery( honors.filter(round=0, gamerTotCnt=F("position")).values('user').annotate(slave_count=Count('user')).filter( user_id=OuterRef('id')).values('slave_count')[:1]), king_per=Cast(F("king_count"), FloatField()) * 100 / Cast(F("tot_count"), FloatField()), slave_per=Cast(F("slave_count"), FloatField()) * 100 / Cast(F("tot_count"), FloatField()), ) context = { 'user': user, 'honor_by_king': users_honor.order_by('-king_per'), 'honor_by_slave': users_honor.order_by('-slave_per'), 'pick_by_king': users_pick_honor.order_by('-king_per'), 'pick_by_slave': users_pick_honor.order_by('-slave_per'), } return render(request, self.template_name, context)
def progress(request): start_of_week = datetime.datetime.today() - datetime.timedelta( days=datetime.datetime.today().isoweekday() % 7) tasks_left_this_week = Task.objects.annotate( date_only=Cast('end_time', DateField())).filter( user=request.user.id, completed=False, date__gte=start_of_week, end_time__lte=start_of_week + datetime.timedelta(days=6)) estimated_hours = tasks_left_this_week.aggregate( total=Sum('hours'))['total'] estimated_minutes = tasks_left_this_week.aggregate( total=Sum('minutes'))['total'] if estimated_minutes and estimated_hours: estimated_hours += estimated_minutes // 60 estimated_minutes %= 60 else: estimated_hours = 0 estimated_minutes = 0 weekly = Task.objects.filter(completed=True).annotate( week=Extract('date_completed', 'week'), year=Extract('date_completed', 'year')).values('year', 'week').annotate( per_week=Count('week')).aggregate(avg=Avg('per_week')) tasks_done_this_week = Task.objects.filter( user=request.user.id, completed=True, date_completed__gte=start_of_week, date_completed__lte=start_of_week + datetime.timedelta(days=6)).count() if weekly['avg'] < tasks_done_this_week: emoji = ':(' elif weekly['avg'] == tasks_done_this_week: emoji = ': |' else: emoji = ':)' context = { 'state': emoji, 'tasks_left': tasks_left_this_week.count(), 'est_hours': estimated_hours, 'est_minutes': estimated_minutes } return context
def get(self, request, *args, **kwargs): num_days = int(request.GET.get('days', 10)) offset = int(request.GET.get('offset', 0)) show_only = [] for status in ['confirmed', 'unconfirmed', 'cancelled']: if int(request.GET.get(status, 1)): show_only.append(status) now = timezone.now() + timedelta(days=offset * num_days) dates = [now.date() + timedelta(days=n) for n in range(num_days)] appointments = (Appointment.objects.annotate( date=Cast('appointment_date', DateField())).filter( date__range=(dates[0], dates[-1]), appointment_confirm_status__in=show_only).order_by('date')) return Response({ 'appointments': [a.as_row() for a in appointments], 'range_str': "{} - {}".format(dates[0].strftime('%B %d'), dates[-1].strftime('%B %d, %Y')), })
def changes(request): style = request.GET.get('style') # all_docs since = int(request.GET.get('since', '0')) limit = int(request.GET.get( 'limit', '1000')) # TODO: make default max limit configurable feed = request.GET.get('feed', 'normal') # (continuous, normal, longpoll) # TODO: filter results = [] # TODO: stream last_change = 0 for change in Change.objects.filter( pk__gt=since).values('document_id').annotate( id=Max('pk'), deleted=Max('deleted')).order_by('id')[:limit]: # TODO: instead of querying, use ArrayAgg for revisions if db is postgres change_id = change['id'] revisions = Change.objects.filter( pk__gt=since, document_id=change['document_id']).annotate( rev=Concat(Value('1-'), Cast('revision', CharField()))).values( 'rev').order_by('pk') row = { "seq": change_id, "id": change['document_id'], "changes": revisions[::-1] } if change["deleted"] == 1: row["deleted"] = True results.append(row) last_change = change_id if feed == 'normal': return JsonResponse({ "results": results, "last_seq": str(last_change if last_change > 0 else Change.objects.latest('id' ).id) }) else: return HttpResponseBadRequest( '{"error": "sync style not implemented"}', content_type='application/json')
def get_next_id(queryset: QuerySet, id_field: Field, max_len: int): """ Fetch the next sequential ID value by incrementing the maximum ID value in a queryset. :param queryset QuerySet: The queryset to get the next sequential ID from :param id_field Field: The ID field to consider :param max_len int: The maximum length of an ID value """ if not queryset: return "1".zfill(max_len) return (queryset.annotate(next_id=Func( Cast(F(id_field.name), IntegerField()) + 1, Value(f"FM{'0' * max_len}"), function="TO_CHAR", output_field=id_field, ), ).exclude(next_id__in=queryset.values(id_field.name), ).order_by( id_field.name).first().next_id)
def get_dashboard_cas_usage_counts(self, start_date, end_date, domain): """ :param start_date: start date of the filter :param end_date: end date of the filter :param domain :return: returns the counts of no of downloads of each and total reports for all usernames """ print(f'Compiling cas export usage counts for users') cas_user_counts = defaultdict(int) records = list(ICDSAuditEntryRecord.objects.filter(url=f'/a/{domain}/cas_export', time_of_use__gte=start_date, time_of_use__lt=end_date) .annotate(indicator=Cast(KeyTextTransform('indicator', 'post_data'), TextField())) .values('indicator') .annotate(count=Count('indicator')).values('username', 'count').order_by('username')) for record in records: cas_user_counts[record['username'].split('@')[0]] += record['count'] return cas_user_counts
def as_expression(self, bare_lookup, fallback=True): """ Compose an expression to get the value for this virtual field in a query. """ language = self.get_language() if language == DEFAULT_LANGUAGE: return F(self._localized_lookup(language, bare_lookup)) if not fallback: i18n_lookup = self._localized_lookup(language, bare_lookup) return Cast(i18n_lookup, self.output_field()) fallback_chain = get_fallback_chain(language) # first, add the current language to the list of lookups lookups = [self._localized_lookup(language, bare_lookup)] # and now, add the list of fallback languages to the lookup list for fallback_language in fallback_chain: lookups.append(self._localized_lookup(fallback_language, bare_lookup)) return Coalesce(*lookups, output_field=self.output_field())
def get_queryset(self): qs = Well.objects.all() \ .select_related('intended_water_use', 'aquifer', 'aquifer__material', 'aquifer__subtype') \ .prefetch_related('screen_set') if not self.request.user.groups.filter(name=WELLS_EDIT_ROLE).exists(): qs = qs.exclude(well_publication_status='Unpublished') # check if a point was supplied (note: actual filtering will be by # the filter_backends classes). If so, add distances from the point. point = self.request.query_params.get('point', None) srid = self.request.query_params.get('srid', 4326) radius = self.request.query_params.get('radius', None) if point and radius: try: shape = GEOSGeometry(point, srid=int(srid)) radius = float(radius) assert shape.geom_type == 'Point' except (ValueError, AssertionError, GDALException, GEOSException): raise ValidationError({ 'point': 'Invalid point geometry. Use geojson geometry or WKT. Example: {"type": "Point", "coordinates": [-123,49]}' }) else: qs = qs.annotate(distance=Cast( Distance('geom', shape), output_field=FloatField())).order_by('distance') # can also supply a comma separated list of wells wells = self.request.query_params.get('wells', None) if wells: wells = wells.split(',') for w in wells: if not w.isnumeric(): raise ValidationError(detail='Invalid well') wells = map(int, wells) qs = qs.filter(well_tag_number__in=wells) return qs
def test_function(self): if settings.BACKEND == 'mysql': # Explicit cast for MySQL with Coalesce and Datetime # https://docs.djangoproject.com/en/2.1/ref/models/database-functions/#coalesce annotation = { 'oldest_child_with_other': Cast(SubqueryMin(Coalesce('child__other_timestamp', 'child__timestamp'), output_field=DateTimeField()), DateTimeField()) } else: annotation = { 'oldest_child_with_other': SubqueryMin(Coalesce('child__other_timestamp', 'child__timestamp'), output_field=DateTimeField()) } parents = Parent.objects.filter(name='John').annotate(**annotation) oldest_child = Child.objects.filter(parent__name='John').order_by(Coalesce('other_timestamp', 'timestamp').asc())[0] self.assertEqual(parents[0].oldest_child_with_other, oldest_child.other_timestamp or oldest_child.timestamp)
def annotations(self): maxdate = datetime.date.today().replace(day=1) mindate = maxdate - relativedelta(years=1) return { 'num_transactions': Count('transaction'), 'year_transactions': Count('transaction', filter=Q(transaction__date__gte=mindate, transaction__date__lt=maxdate)), 'year_total': Sum('transaction__amount', filter=Q(transaction__date__gte=mindate, transaction__date__lt=maxdate)), 'year_average': Cast( F('year_total') / 12, DecimalField(max_digits=9, decimal_places=2)) }
def write(self, src, ip, storage_method, storage_medium, block_size=DEFAULT_TAPE_BLOCK_SIZE): block_size = storage_medium.block_size * 512 last_written_obj = StorageObject.objects.filter( storage_medium=storage_medium).annotate( content_location_value_int=Cast( 'content_location_value', IntegerField())).order_by( 'content_location_value_int').only( 'content_location_value').last() if last_written_obj is None: tape_pos = 1 else: tape_pos = last_written_obj.content_location_value_int + 1 try: drive = TapeDrive.objects.get(storage_medium=storage_medium) except TapeDrive.DoesNotExist: raise ValueError("Tape not mounted") try: set_tape_file_number(drive.device, tape_pos) write_to_tape(drive.device, src, block_size=block_size) except OSError as e: if e.errno == errno.ENOSPC: storage_medium.mark_as_full() else: raise drive.last_change = timezone.now() drive.save(update_fields=['last_change']) return StorageObject.objects.create( content_location_value=tape_pos, content_location_type=TAPE, ip=ip, storage_medium=storage_medium, container=storage_method.containers)
def apply_sorting(self, order_lib): ''' Applies ordering on the columns and direction as given the order of importeance is determined by the number after the 'order' flag. :param order_lib: the columns to sort :return: changes self.data ''' order_lib = sorted([(int(k),i) for k,i in order_lib.items()]) order_list = [] for key,value in order_lib: sort_string = self.columns[value['column']]['data'] sort_list = [] if sort_string in ["id_collectie"]: str_name = sort_string+'_str' int_name = sort_string+'_int' self.data = self.data.annotate(**{ str_name:Func( F(sort_string), Value('[^A-Z]'), Value(''), Value('g'), function='regexp_replace') }) self.data = self.data.annotate(**{ int_name:Cast(Func( F(sort_string), Value('[^\d]'), Value(''), Value('g'), function='regexp_replace' ), IntegerField()) }) sort_list.append(str_name) sort_list.append(int_name) else: sort_list.append(sort_string) if value['dir'] == 'desc': for item in sort_list: order_list.append("-" + item) else: order_list += sort_list self.data = self.data.order_by(*order_list)
def process_math(query: QuerySet, entity: Entity) -> QuerySet: math_to_aggregate_function = {"sum": Sum, "avg": Avg, "min": Min, "max": Max} if entity.math == "dau": # In daily active users mode count only up to 1 event per user per day query = query.annotate(count=Count("person_id", distinct=True)) elif entity.math in math_to_aggregate_function: # Run relevant aggregate function on specified event property, casting it to a double query = query.annotate( count=math_to_aggregate_function[entity.math]( Cast(RawSQL('"posthog_event"."properties"->>%s', (entity.math_property,)), output_field=FloatField(),) ) ) # Skip over events where the specified property is not set or not a number # It may not be ideally clear to the user what events were skipped, # but in the absence of typing, this is safe, cheap, and frictionless query = query.extra( where=['jsonb_typeof("posthog_event"."properties"->%s) = \'number\''], params=[entity.math_property], ) return query
def check_if_inventory_linked(instance, action, **kwargs): if 'loaddata' in sys.argv or kwargs.get('raw', False): # nocv return if action != "pre_remove": return removing_inventories = instance.inventories.filter(pk__in=kwargs['pk_set']) check_id = removing_inventories.values_list('id', flat=True) linked_templates = Template.objects.filter(inventory__iregex=r'^[0-9]{1,128}$').\ annotate(inventory__id=Cast('inventory', IntegerField())).\ filter(inventory__id__in=check_id) linked_periodic_tasks = PeriodicTask.objects.filter( _inventory__in=check_id) if linked_periodic_tasks.exists() or linked_templates.exists(): raise_linked_error( linked_templates=list(linked_templates.values_list('id', flat=True)), linked_periodic_tasks=list( linked_periodic_tasks.values_list('id', flat=True)), )
def annotate_balance(cls, users=None, associations=None): """ Returns a list of all users or associations with their respective credits :param users: A list of users to annotate, defaults to users if none is given :param associations: a list of associations to annnotate :return: The list annotated with 'balance' """ if users is not None and associations is not None: raise ValueError( "Either users or associations need to have a value, not both") # Set the query result if associations: result = associations q_type = "associations" else: # If users is none, the result becomes a list of all users automatically (set in query retrieval) result = users q_type = "users" # Get all child classes children = cls.get_children() # Loop over all children, get their respective transaction queries, union the transaction queries for child in children: result = child.annotate_balance(**{q_type: result}) # Get the annotated name values of its immediate children sum_query = None for child in children: # If sumquery is not yet defined, define it, otherwise add add it to the query if sum_query: sum_query += F(child.balance_annotation_name) else: sum_query = F(child.balance_annotation_name) from django.db.models.functions import Cast sum_query = Cast(sum_query, models.FloatField()) # annotate the results of the children in a single variable name result = result.annotate(**{cls.balance_annotation_name: sum_query}) return result
def get(self, request, num_page=1): response = {"form": CommentForm()} if request.user.is_authenticated: sub_query_1 = BookLike.objects.filter(user=request.user, book=OuterRef('pk')).values('rate') sub_query_2 = Exists(User.objects.filter(id=request.user.id, book=OuterRef('pk'))) sub_query_3 = Exists(User.objects.filter(id=request.user.id, comment=OuterRef('pk'))) sub_query_4 = Exists(User.objects.filter(id=request.user.id, like=OuterRef('pk'))) comment = Comment.objects.annotate(is_owner=sub_query_3, is_liked=sub_query_4). \ select_related('user').prefetch_related('like') comment_prefetch = Prefetch('comment', comment) result = Book.objects.annotate(user_rate=Cast(sub_query_1, CharField()), is_owner=sub_query_2) \ .prefetch_related(comment_prefetch, "author", "genre", "rate") else: result = Book.objects.prefetch_related("author", "genre", "comment", "comment__user").all() pag = Paginator(result, 5) response['content'] = pag.page(num_page) response['count_page'] = list(range(1, pag.num_pages + 1)) response['book_form'] = BookForm() return render(request, "index.html", response)
def getPendencies(request): response = "" json_r = {} if request.method == "POST": json_data = json.loads(request.body.decode("utf-8")) try: username = json_data["email"] subject = json_data["subject_slug"] if username is not None and subject is not None: notifications = (Notification.objects.filter( user__email=username, task__resource__topic__subject__slug=subject, ).annotate(str_date=Cast("creation_date", TextField())).values( "str_date").order_by("-str_date").annotate( total=Count("str_date"))) json_r["data"] = list(notifications) json_r["message"] = "" json_r["type"] = "" json_r["title"] = "" json_r["success"] = True json_r["number"] = 1 json_r["extra"] = 0 response = json.dumps(json_r) subject = Subject.objects.get(slug=subject) request.log_context = {} request.log_context["subject_id"] = subject.id request.log_context["subject_slug"] = subject.slug request.log_context["subject_name"] = subject.name except KeyError: response = "Error" return HttpResponse(response)
def inicio(request): '''Pagina de inicio''' if request.user.is_authenticated: registro_horas = RegistroHora.objects.filter( empleado__usuario__username=request.user).values( 'contrato__nombre', 'nombre', 'detalle', 'fecha', horas=Cast('horas_trabajadas', TimeField())) #Últimas 5 tareas cargadas tareas_realizadas = registro_horas.values( 'contrato__nombre', 'nombre', 'detalle', 'fecha').order_by('fecha')[:5] #Horas cargadas por proyecto, en el mes horas_proyectos = registro_horas.values('contrato__nombre').annotate(Sum('horas')) #Proyectos asignadas en el mes (activos) proyectos_asignados = registro_horas.values('contrato__nombre').distinct().count() #Total de horas mensuales total_horas_mensuales = registro_horas.aggregate(Sum('horas')) total_horas_mensuales = total_horas_mensuales['horas__sum'] total_horas_mensuales = int(total_horas_mensuales.days *24 + total_horas_mensuales.seconds/3600) if total_horas_mensuales != None else 0 #Total de tareas realizadas en el mes total_tareas_mensuales = registro_horas.values('nombre').distinct().count() context = { 'proyectos_asignados':proyectos_asignados, 'total_horas_mensuales':total_horas_mensuales, 'total_tareas_mensuales':total_tareas_mensuales, 'tareas_realizadas':tareas_realizadas, 'horas_proyectos':horas_proyectos } return render(request, 'index.html',context) else: return redirect('login')
def search_properties(search_str, permit_requests_qs, limit=None): qs = (add_score( models.WorksObjectPropertyValue.objects.filter( property__input_type=models.WorksObjectProperty.INPUT_TYPE_TEXT, works_object_type_choice__permit_request__in=permit_requests_qs, ).annotate(txt_value=Cast(KeyTextTransform("val", "value"), output_field=TextField()), ), ["txt_value"], search_str, ).annotate(author_full_name=Concat( "works_object_type_choice__permit_request__author__user__first_name", Value(" "), "works_object_type_choice__permit_request__author__user__last_name", )).values( "works_object_type_choice__permit_request_id", "works_object_type_choice__permit_request__status", "works_object_type_choice__permit_request__created_at", "author_full_name", "property__name", "txt_value", "score", ).order_by("-score", "-works_object_type_choice__permit_request__created_at")) if limit is not None: qs = qs[:limit] return [ SearchResult( permit_request_id=result[ "works_object_type_choice__permit_request_id"], permit_request_status=result[ "works_object_type_choice__permit_request__status"], permit_request_created_at=result[ "works_object_type_choice__permit_request__created_at"], author_name=result["author_full_name"], field_label=result["property__name"], field_value=result["txt_value"], score=result["score"], match_type=MatchType.PROPERTY, ) for result in qs ]
def test_string_agg_array_agg_ordering_in_subquery(self): stats = [] for i, agg in enumerate(AggregateTestModel.objects.order_by("char_field")): stats.append(StatTestModel(related_field=agg, int1=i, int2=i + 1)) stats.append(StatTestModel(related_field=agg, int1=i + 1, int2=i)) StatTestModel.objects.bulk_create(stats) for aggregate, expected_result in ( ( ArrayAgg("stattestmodel__int1", ordering="-stattestmodel__int2"), [ ("Foo1", [0, 1]), ("Foo2", [1, 2]), ("Foo3", [2, 3]), ("Foo4", [3, 4]), ], ), ( StringAgg( Cast("stattestmodel__int1", CharField()), delimiter=";", ordering="-stattestmodel__int2", ), [("Foo1", "0;1"), ("Foo2", "1;2"), ("Foo3", "2;3"), ("Foo4", "3;4")], ), ): with self.subTest(aggregate=aggregate.__class__.__name__): subquery = ( AggregateTestModel.objects.filter( pk=OuterRef("pk"), ) .annotate(agg=aggregate) .values("agg") ) values = ( AggregateTestModel.objects.annotate( agg=Subquery(subquery), ) .order_by("char_field") .values_list("char_field", "agg") ) self.assertEqual(list(values), expected_result)
def get_objects_for_group_roles(group, permission_name, qs, accept_global_perms=True): if "." in permission_name: app_label, codename = permission_name.split(".", maxsplit=1) permission = Permission.objects.get(content_type__app_label=app_label, codename=codename) else: permission = Permission.objects.get(codename=permission_name) if (accept_global_perms and group.object_roles.filter( object_id=None, role__permissions=permission).exists()): return qs group_role_pks = group.object_roles.filter( role__permissions=permission).values_list("object_id", flat=True) return qs.annotate(pk_str=Cast("pk", output_field=CharField())).filter( pk_str__in=group_role_pks)
def get(self, request, *args, **kwargs): dis = Discussion.objects.get(pk=request.query_params.get('discussion')) users = CustomUser.objects.filter( answers__question__discussion=dis).annotate( age=Cast(Now() - F('birthday'), output_field=IntegerField())) data = list( users.values('age').annotate(Count('age')).values( 'age', 'age__count')) return Response({ 'labels': [age['age'] for age in data], 'datasets': [{ 'data': [age['age__count'] for age in data], 'backgroundColor': [ 'rgba(54, 162, 235, 0.2)', 'rgba(255, 99, 132, 0.2)', ], 'label': 'Возраст' }] })
def annotate_notifications(self, user=None, unread=True): """ :param user (optional) DEFAULT None: :param unread (optional) DEFAULT True: :return: """ if user is not None: room = self.annotate(notifications_count=Count(Case( When(Q(users__notifications__unread=unread) & Q(users__notifications__recipient_id=str(user)) & Q(users__notifications__target_content_type=ContentType. objects.get_for_model(self.model)) & Q(users__notifications__target_object_id=Cast( F('id'), CharField()), ), then=Value(1)), output_field=IntegerField()), distinct=True)) return room return self