def get_queryset(self): user = self.request.user qs = ( Entry.objects.filter(owner=user) .annotate( type_source=KeyTextTransform('source', 'type'), type_de=KeyTextTransform('de', KeyTextTransform('label', 'type')), type_en=KeyTextTransform('en', KeyTextTransform('label', 'type')), ) .order_by('-date_changed') ) if self.action == 'list': q = self.request.query_params.get('q', None) if q: qs = ( Entry.objects.search(q) .filter(owner=user) .annotate( type_source=KeyTextTransform('source', 'type'), type_de=KeyTextTransform('de', KeyTextTransform('label', 'type')), type_en=KeyTextTransform('en', KeyTextTransform('label', 'type')), ) ) entry_pk = self.request.query_params.get('link_selection_for', None) if entry_pk: try: entry = Entry.objects.get(pk=entry_pk, owner=user) qs = qs.exclude(pk=entry_pk).exclude(to_entries__from_entry=entry) except Entry.DoesNotExist: return Entry.objects.none() return qs
def getBlockLevel(reports): blockreport = reports.filter( Q(report_type='BlockReport') | Q(report_type='BlockReportSummarized')).annotate( district_name=KeyTextTransform('district_name', 'parameters'), block_name=KeyTextTransform('block_name', 'parameters')) blocks = blockreport.values_list( 'block_name', flat=True).distinct() # Get district names count = [] for block in blocks: #import pdb; pdb.set_trace() block_reports = blockreport.filter(block_name=block) district_name = block_reports.values_list('district_name', flat=True).distinct()[0] #block_obj = Boundary.objects.filter(boundary_type_id='SB').get(name=block) #block_num = c.values_list('block_name', flat=True).distinct().count() # Get district names #block_num = reports.filter(Q(report_type='BlockReport')|Q(report_type='BlockReportSummarized')).count() sent = block_reports.count() visit = block_reports.aggregate( sum=Sum('tracking__visit_count'))['sum'] read = block_reports.aggregate(read_unique=Count( Case(When(tracking__visit_count__gt=0, then=1))))['read_unique'] download = block_reports.aggregate( sum=Sum('tracking__download_count'))['sum'] count.append( dict(sent=sent, read=read, visit=visit, download=download, block=block, district=district_name)) return count
def calculate_score(self): """Calculate score. Return: Queryset: Queryset """ return self.annotate( score_content=Coalesce( Sum( Cast(KeyTextTransform("score", "content__src"), models.FloatField())), 0), total_content=Count('content'), score_channels=Coalesce( Sum( Cast( KeyTextTransform("score", "tree_channels__content__src"), models.FloatField())), 0), total_channels=Count('tree_channels__content'), ).annotate( sum_content=models.ExpressionWrapper( F('score_content') + F('score_channels'), # NOQA output_field=models.FloatField()), sum_tota=models.ExpressionWrapper( F('total_content') + F('total_channels'), output_field=models.FloatField()), ).annotate(average=models.ExpressionWrapper( F('sum_content') / F('sum_tota'), # NOQA output_field=models.FloatField()))
def apply_filters(self, request, applicable_filters): one_of = ['contest_id__exact', 'account_id__exact', 'coder_id'] for k in one_of: if applicable_filters.get(f'{k}'): break else: raise BadRequest( f'One of {[k.split("__")[0] for k in one_of]} is required') rating_change_isnull = applicable_filters.pop('rating_change__isnull', None) new_rating_isnull = applicable_filters.pop('new_rating__isnull', None) coder_id = applicable_filters.pop('coder_id', None) qs = super().apply_filters(request, applicable_filters) qs = qs.select_related('account', 'contest') if rating_change_isnull: qs = qs.filter( addition__rating_change__isnull=rating_change_isnull[0] in ['true', '1', 'yes']) if new_rating_isnull: qs = qs.filter(addition__new_rating__isnull=new_rating_isnull[0] in ['true', '1', 'yes']) if coder_id: qs = qs.filter(account__coders=coder_id[0]) qs = qs \ .annotate(new_rating=Cast(KeyTextTransform('new_rating', 'addition'), IntegerField())) \ .annotate(old_rating=Cast(KeyTextTransform('old_rating', 'addition'), IntegerField())) \ .annotate(rating_change=Cast(KeyTextTransform('rating_change', 'addition'), IntegerField())) return qs
def _get_companies_queryset(self): # subquery used to only get the matches without duplicates subquery_for_matched_duns_numbers = DnBMatchingResult.objects.filter( company__archived=False, ).annotate( dnb_match=KeyTransform('dnb_match', 'data'), matched_duns_number=KeyTextTransform( 'duns_number', Cast('dnb_match', JSONField()), ), ).values('matched_duns_number', ).annotate( group_count=Count('matched_duns_number'), ).filter( matched_duns_number__isnull=False, group_count=1, ).values('matched_duns_number') # subquery used to exclude all the duns_numbers already being used # (there's a unique constraint on the Company.duns_number field) subquery_for_existing_duns_numbers = Company.objects.filter( duns_number__isnull=False, ).values('duns_number') return Company.objects.annotate( wb_record=KeyTransform('wb_record', 'dnbmatchingresult__data'), matched_duns_number=KeyTextTransform( 'DUNS Number', Cast('wb_record', JSONField()), ), ).filter( duns_number__isnull=True, archived=False, matched_duns_number__in=subquery_for_matched_duns_numbers, ).exclude(matched_duns_number__in=subquery_for_existing_duns_numbers, )
def getClusterLevel(reports): clusterreport = reports.filter( Q(report_type='ClusterReport') | Q(report_type='ClusterReportSummarized')).annotate( cluster_name=KeyTextTransform('cluster_name', 'parameters'), block_name=KeyTextTransform('block_name', 'parameters')) clusters = clusterreport.values_list('cluster_name', flat=True).distinct() count = [] for cluster in clusters: c = clusterreport.filter(cluster_name=cluster) block_name = c.values_list('block_name', flat=True).distinct()[0] #cluster_num = c.values_list('cluster_name', flat=True).distinct().count() sent = c.count() visit = c.aggregate(sum=Sum('tracking__visit_count'))['sum'] read = c.aggregate(read_unique=Count( Case(When(tracking__visit_count__gt=0, then=1))))['read_unique'] download = c.aggregate(sum=Sum('tracking__download_count'))['sum'] count.append( dict(sent=sent, read=read, visit=visit, download=download, block=block_name, cluster=cluster)) return count
def user_profile(request, user_id): user = get_object_or_404(User, id=user_id) group_name = None group_color = None for possible_group_name, possible_group_color in GROUP_COLORS.items(): if user.is_in_group(possible_group_name): group_name = possible_group_name group_color = possible_group_color break games_of_user = PlayerInGame.objects.prefetch_related('game').annotate(players_count=Count('game__players'),\ game_variant=KeyTextTransform('setupId', KeyTextTransform('settings', 'game__view_of_game')),\ has_won=Cast(KeyTextTransform('is_winner', 'data'), BooleanField()),\ is_faceless=Cast(KeyTextTransform('faceless', KeyTextTransform('settings', 'game__view_of_game')), BooleanField())\ # We can simply filter for is_faceless=False|None and omit the check for ongoing games only as we reset the property after game ended. ).filter(Q(user=user) & (Q(is_faceless=None) | Q(is_faceless=False))).order_by('-game__created_at') user.games_of_user = games_of_user.filter(Q(game__state=IN_LOBBY) | Q(game__state=ONGOING) | Q(game__state=FINISHED)) user.cancelled_games = games_of_user.filter(game__state=CANCELLED) user.ongoing_count = games_of_user.filter(game__state=ONGOING).count() user.won_count = games_of_user.filter(Q(has_won=True) & ~Q(game_variant='learn-the-game')).count() user.finished_count = games_of_user.exclude(data__is_winner__isnull=True).filter((Q(game__state=FINISHED) & ~Q(game_variant='learn-the-game'))).count() if user.finished_count > 0: user.win_rate = "{:.1f} %".format(user.won_count / user.finished_count * 100) else: user.win_rate = "n/a" # This will give the total average of the last 100 moves. But we want to exclude the 10 biggest and smallest values from list so we need to do it in Python #avg_pbem_speed = PbemResponseTime.objects.filter(user=user).order_by('-created_at')[:100].aggregate(Avg('response_time')).get('response_time__avg') #user.average_pbem_speed = str(timedelta(seconds=round(avg_pbem_speed))) if avg_pbem_speed is not None else "n/a" elements = PbemResponseTime.objects.filter(user=user).order_by('-created_at')[:100] if elements is not None and len(elements) > 0: values = [element.response_time for element in elements] if len(values) > 20: values = sorted(values) del values[:10] del values[-10:] avg = round(sum(values) / len(values)) user.average_pbem_speed = str(timedelta(seconds=avg)) else: user.average_pbem_speed = "n/a" return render(request, "agotboardgame_main/user_profile.html", { "viewed_user": user, "group_name": group_name, "group_color": group_color, "banned_or_on_probation": request.user.is_authenticated and request.user.is_in_one_group(["On probation", "Banned"]) })
def get_report(cls, start_date, end_date, tables=[]): orders = cls.get_orders_from_date_range(start_date, end_date, tables) customers = orders.annotate( male=Cast( KeyTextTransform('male', KeyTextTransform('customers', 'details')), IntegerField()), female=Cast( KeyTextTransform('female', KeyTextTransform('customers', 'details')), IntegerField())).values('male', 'female').aggregate( total_male=Sum('male'), total_female=Sum('female')) # Validation of data for NoneType if customers['total_male'] is None: customers['total_male'] = 0 if customers['total_female'] is None: customers['total_female'] = 0 OrderItem = apps.get_model('orders', 'OrderItem') items = OrderItem.objects.filter(order__in=orders).exclude( state=ORDER_STATE.canceled) item_sales = sum([item['amount'] for item in items.values('amount')]) total_earning = int( sum(order['income'] for order in orders.values('income'))) if len(items) > 0: sales = { 'items': item_sales, 'earning': total_earning, # items.aggregate( # total=ExpressionWrapper( # Sum(F('price') * F('amount')), # output_field=models.FloatField()))['total'] } else: sales = {'items': 0, 'earning': 0} return { 'orders': { 'count': len(orders) }, 'customers': { 'count': customers.get('total_male', 0) + customers.get('total_female', 0) }, 'sales': sales, }
def extract_counts(indicator, qs): """ Data extraction for indicator data object. Create Indicator data object for indicator object and all geography. This task populates the Json field in indicator data object. Data json field should be populated with json of group name in groups of indicator and total count of that group according to geography. So for Gender group object should look like : {"Gender": "Male", "count": 123, ...} """ if indicator.universe is not None: qs = qs.filter_by_universe(indicator.universe) groups = ["data__" + i for i in indicator.groups] c = Cast(KeyTextTransform("count", "data"), FloatField()) qs = qs.exclude(data__count="") qs = qs.order_by("geography_id") data = groupby(qs.grouped_totals_by_geography(groups), lambda x: x["geography_id"]) datarows = [] for geography_id, group in data: data_dump = json.dumps(list(group)) grouped = json.loads(data_dump.replace("data__", "")) for item in grouped: item.pop("geography_id") datarows.append({"geography_id": geography_id, "data": grouped}) return datarows
def get_dashboard_tabular_usage_counts(self, start_date, end_date, domain): """ :param start_date: start date of the filter :param end_date: end date of the filter :param domain :return: returns the counts of no of downloads of each and total reports for all usernames """ print(f'Compiling tabular usage counts for users') tabular_user_counts = defaultdict(int) tabular_user_indicators = defaultdict(lambda: [0] * 10) records = list(ICDSAuditEntryRecord.objects.filter(url=f'/a/{domain}/icds_export_indicator', time_of_use__gte=start_date, time_of_use__lt=end_date) .annotate(indicator=Cast(KeyTextTransform('indicator', 'post_data'), TextField())) .filter(indicator__lte=THR_REPORT_EXPORT).values('indicator', 'username') .annotate(count=Count('indicator')).order_by('username', 'indicator')) for record in records: if record['indicator'] == '': continue tabular_user_counts[record['username'].split('@')[0]] += record['count'] tabular_user_indicators[record['username'].split('@')[0]][int(record['indicator']) - 1]\ = record['count'] return tabular_user_counts, tabular_user_indicators
def test_lookups_with_key_transform(self): tests = ( ('field__d__contains', 'e'), ('field__baz__contained_by', { 'a': 'b', 'c': 'd', 'e': 'f' }), ('field__baz__has_key', 'c'), ('field__baz__has_keys', ['a', 'c']), ('field__baz__has_any_keys', ['a', 'x']), ('field__contains', KeyTransform('bax', 'field')), ( 'field__contained_by', KeyTransform( 'x', RawSQL('%s::jsonb', ['{"x": {"a": "b", "c": 1, "d": "e"}}'])), ), ('field__has_key', KeyTextTransform('foo', 'field')), ) for lookup, value in tests: with self.subTest(lookup=lookup): self.assertTrue( JSONModel.objects.filter(**{ lookup: value }, ).exists())
def lookups(self, request, model_admin): """ Returns a list of tuples. The first element in each tuple is the coded value for the option that will appear in the URL query. The second element is the human-readable name for the option that will appear in the right sidebar. """ from collections import Counter from django.contrib.postgres.fields.jsonb import KeyTextTransform qs = model_admin.get_queryset(request).annotate( types=KeyTextTransform('type', 'data')).values_list('types', flat=True) return ((x, _('%s' % x)) for x in set([q for q in qs])) # print q # if qs.filter(birthday__gte=date(1980, 1, 1), # birthday__lte=date(1989, 12, 31)).exists(): # yield ('80s', _('in the eighties')) # if qs.filter(birthday__gte=date(1990, 1, 1), # birthday__lte=date(1999, 12, 31)).exists(): # yield ('90s', _('in the nineties')) return ( ('event', _('event')), ('person', _('person')), ('letter', _('letter')), )
def __init__( self, column, zh, en=None, field=None, enum=None, annotate=None, fn=None, child_fields=[]): self.column = column self.zh = zh self.en = en if en else column self.field = field self.annotate = annotate self.fn = fn self.child_fields = [] if field: self.en = '{}_{}'.format(self.en, field) if not annotate and self.field: self.annotate = KeyTextTransform(self.field, self.column) for child in child_fields: self.child_fields.append(Field(**child))
def get_queryset(self): qs = super().get_queryset() description_search = self.request.GET.get("description_search") if description_search: qs = qs.filter(description__icontains=description_search) name_search = self.request.GET.get("name_search") if name_search: qs = qs.filter(name__icontains=name_search) language_search = self.request.GET.get("language_search") if language_search: qs = qs.filter(language__icontains=language_search) if 'party_pk' in self.request.GET: qs = qs.filter(textunit__partyusage__party__pk=self.request.GET['party_pk']) # Populate with child counts qs = qs.annotate( is_contract=KeyTextTransform('is_contract', 'metadata'), properties=Count('documentproperty', distinct=True), text_units=F('paragraphs') + F('sentences'), num_relation_a=Count('document_a_set', distinct=True), num_relation_b=Count('document_b_set', distinct=True), ).annotate(relations=F('num_relation_a') + F('num_relation_b')) return qs
def get_cars_count(self): cars = Car.objects.annotate( color_id=Cast(KeyTextTransform( 'colorId', 'parameters'), models.IntegerField())).filter( color_id=OuterRef('pk')).order_by().values('color_id') cars_agg = cars.annotate(total=Count('*')).values('total') return Color.objects.annotate(cars_count=Subquery(cars_agg))
def create_pickup_upcoming_notifications(): # Oh oh, this is a bit complex. As notification.context is a JSONField, the collectors_already_notified subquery # would return a jsonb object by default (which can't be compared to integer). # We can work around this by transforming the property value to text ("->>" lookup) and then casting to integer collectors_already_notified = Notification.objects.\ order_by().\ filter(type=NotificationType.PICKUP_UPCOMING.value).\ exclude(context__pickup_collector=None).\ values_list(Cast(KeyTextTransform('pickup_collector', 'context'), IntegerField()), flat=True) pickups_due_soon = PickupDate.objects.order_by().due_soon() collectors = PickupDateCollector.objects.\ filter(pickupdate__in=pickups_due_soon).\ exclude(id__in=collectors_already_notified).\ distinct() for collector in collectors: Notification.objects.create( type=NotificationType.PICKUP_UPCOMING.value, user=collector.user, expires_at=collector.pickupdate.date, context={ 'group': collector.pickupdate.store.group.id, 'store': collector.pickupdate.store.id, 'pickup': collector.pickupdate.id, 'pickup_collector': collector.id, }, )
def create_activity_upcoming_notifications(): # Oh oh, this is a bit complex. As notification.context is a JSONField, the participants_already_notified subquery # would return a jsonb object by default (which can't be compared to integer). # We can work around this by transforming the property value to text ("->>" lookup) and then casting to integer with timer() as t: participants_already_notified = Notification.objects.\ order_by().\ filter(type=NotificationType.ACTIVITY_UPCOMING.value).\ exclude(context__activity_participant=None).\ values_list(Cast(KeyTextTransform('activity_participant', 'context'), IntegerField()), flat=True) activities_due_soon = Activity.objects.order_by().due_soon() participants = ActivityParticipant.objects.\ filter(activity__in=activities_due_soon).\ exclude(id__in=participants_already_notified).\ distinct() for participant in participants: Notification.objects.create( type=NotificationType.ACTIVITY_UPCOMING.value, user=participant.user, expires_at=participant.activity.date.start, context={ 'group': participant.activity.place.group.id, 'place': participant.activity.place.id, 'activity': participant.activity.id, 'activity_participant': participant.id, }, ) stats_utils.periodic_task( 'notifications__create_activity_upcoming_notifications', seconds=t.elapsed_seconds)
def events_query( self, team: Team, filter: SessionsFilter, date_filter: Q, distinct_ids: List[str], start_timestamp: Optional[str], ) -> QuerySet: events = ( Event.objects.filter(team=team) .filter(date_filter) .filter(distinct_id__in=distinct_ids) .order_by("-timestamp") .only("distinct_id", "timestamp") .annotate(current_url=KeyTextTransform("$current_url", "properties")) ) if start_timestamp is not None: events = events.filter(timestamp__lt=datetime.fromtimestamp(float(start_timestamp))) keys = [] for i, entity in enumerate(filter.action_filters): key = f"entity_{i}" events = events.annotate( **{key: ExpressionWrapper(entity_to_Q(entity, team.pk), output_field=BooleanField())} ) keys.append(key) return events.values_list("distinct_id", "timestamp", "current_url", *keys)
def value(self): return self.annotate(value=Cast(KeyTextTransform('value', 'form_data'), output_field=FloatField())).aggregate( Count('value'), Avg('value'), Sum('value'), )
def apply_ordering(queryset, ordering, only_undefined_field=False): if ordering: preprocess_field_name = load_func(settings.PREPROCESS_FIELD_NAME) field_name, ascending = preprocess_field_name( ordering[0], only_undefined_field=only_undefined_field) if field_name.startswith('data__'): # annotate task with data field for float/int/bool ordering support json_field = field_name.replace('data__', '') queryset = queryset.annotate( ordering_field=KeyTextTransform(json_field, 'data')) f = F('ordering_field').asc( nulls_last=True) if ascending else F('ordering_field').desc( nulls_last=True) else: f = F(field_name).asc( nulls_last=True) if ascending else F(field_name).desc( nulls_last=True) queryset = queryset.order_by(f) else: queryset = queryset.order_by("id") return queryset
def get_queryset(self, indicator, geographies, universe=None): groups = ["data__" + i for i in indicator.groups] c = Cast(KeyTextTransform("Count", "data"), models.IntegerField()) qs = ( DatasetData.objects .filter(dataset=indicator.dataset) .filter(geography__in=self.child_geographies) ) if universe is not None: filters = {f"data__{k}": v for k, v in universe.filters.items()} qs = qs.filter(**filters) if len(groups) > 0: qs = (qs.values(*groups) .annotate(count=Sum(c)) ) else: qs = [qs.aggregate(count=Sum(c))] counts = [] for el in qs: el.update(geography=self.geography.pk) counts.append(el) return counts
def _list_filter(self, query_params): filters = query_params.dict() return ( self.annotate( datastart=KeyTransform('start', 'metadata')).annotate( startutc=Cast(KeyTextTransform( 'utc', 'datastart'), models.DateField())).annotate( dataname=KeyTransform('name', 'metadata')). annotate(eventname=KeyTextTransform('text', 'dataname')).annotate( dataticket=KeyTransform('ticket_availability', 'metadata') ).annotate(ticketmin=KeyTransform( 'minimum_ticket_price', 'dataticket')).annotate( price=Cast(KeyTextTransform('value', 'ticketmin'), models.FloatField())). annotate(dataorg=KeyTransform('organizer', 'metadata')).annotate( organization=KeyTextTransform('name', 'dataorg')).filter( **filters))
def get_queryset(self, request): qs = super(ComprobanteAdmin, self).get_queryset(request) # KeyTextTransform extrae el campo 'cae' del json 'data' # Cast castea el campo extraido a un CharField return qs.filter( Q(data__isnull=True) | ~Q(cae=Cast(KeyTextTransform('cae', 'data'), output_field=CharField())) | Q(data_arreglada=True))
def grouped_totals_by_geography(self, groups): """ groups need to be a list of fields to group by. They must be prefixed by data__ for example ['data__sex', 'data__age'] """ c = Cast(KeyTextTransform("count", "data"), FloatField()) return self.values(*groups, "geography_id").annotate(count=Sum(c))
def _localized_lookup(self, language, bare_lookup): if language == DEFAULT_LANGUAGE: return bare_lookup.replace(self.name, self.original_name) name = build_localized_fieldname(self.original_name, language) i18n_lookup = bare_lookup.replace(self.name, "i18n") return KeyTextTransform(name, i18n_lookup)
def counts_and_sums(self, date_from, date_to): return (DataPoint.objects.annotate( day=TruncDay('datetime') ).values('day').annotate(count=Count('datetime')).annotate( observed_irradiation_total=Sum( Cast(KeyTextTransform('irradiation', 'observed'), FloatField()) )).annotate(observed_energy_total=Sum( Cast(KeyTextTransform("energy", 'observed'), FloatField()) )).annotate(expected_irradiation_total=Sum( Cast(KeyTextTransform('irradiation', 'expected'), FloatField()))).annotate(expected_energy_total=Sum( Cast(KeyTextTransform('energy', 'expected'), FloatField()))).values( 'day', 'count', 'observed_irradiation_total', 'observed_energy_total', 'expected_irradiation_total', 'expected_energy_total').filter( Q(datetime__range=[date_from, date_to])))
def handle(self, *args, **options): color_id = KeyTextTransform('colorId', 'parameters') cars = Car.objects\ .annotate(color_id=color_id)\ .values('color_id')\ .annotate(Count('id'))\ .order_by() for car in cars: self.stdout.write(self.style.SUCCESS(repr(car)))
def get_queryset(self): queryset = Campaign.objects.filter(client=self.request.user) for qqq in queryset: posts = Post.objects.filter(campaign__id=qqq.id, is_posted=True).annotate(aa=Cast(KeyTextTransform('likes', 'analysis'), IntegerField()), bb=Cast(KeyTextTransform('comments', 'analysis'), IntegerField())) \ .aggregate(total_likes=Sum('aa'), total_comments=Sum('bb')) qqq.total_likes = posts['total_likes'] qqq.total_comments = posts['total_comments'] return queryset
def order_by_reminder(queryset, time): queryset = queryset.exclude(status='CANCELED') queryset = queryset.annotate(reminder=KeyTextTransform('reminder', 'parameters')) now = datetime.datetime.now() if time == 'expired': queryset = queryset.filter(reminder__lt=datetime.datetime.strftime(now, '%Y-%m-%dT%H:%M:%S')) elif time == 'upcoming': queryset = queryset.filter(reminder__gt=datetime.datetime.strftime(now, '%Y-%m-%dT%H:%M:%S')) queryset = queryset.order_by('reminder') return queryset
def chemical_property_avg(self, property_key): """ Returns a queryset's average value for a chemical property stored in the JSONField Args: property_key (str): the property idenitifier, one of xlogp, hac, rbc, hetac or mw Returns: A float representing the average value Example: >>> Compound.objects.aromatics().chem_property_avg('mw').get('as_int__avg') 190.38532110091742 """ return self.annotate( val=KeyTextTransform(property_key, 'chemical_properties')).annotate( as_float=Cast('val', FloatField())).aggregate(Avg('as_float'))