def test_expressions(self): self.assertEqual( repr(Case(When(a=1))), "<Case: CASE WHEN <Q: (AND: ('a', 1))> THEN Value(None), ELSE Value(None)>" ) self.assertEqual(repr(Col('alias', 'field')), "Col(alias, field)") self.assertEqual(repr(Date('published', 'exact')), "Date(published, exact)") self.assertEqual(repr(DateTime('published', 'exact', utc)), "DateTime(published, exact, %s)" % utc) self.assertEqual(repr(F('published')), "F(published)") self.assertEqual(repr(F('cost') + F('tax')), "<CombinedExpression: F(cost) + F(tax)>") self.assertEqual(repr(Func('published', function='TO_CHAR')), "Func(F(published), function=TO_CHAR)") self.assertEqual(repr(OrderBy(Value(1))), 'OrderBy(Value(1), descending=False)') self.assertEqual(repr(Random()), "Random()") self.assertEqual(repr(RawSQL('table.col', [])), "RawSQL(table.col, [])") self.assertEqual(repr(Ref('sum_cost', Sum('cost'))), "Ref(sum_cost, Sum(F(cost)))") self.assertEqual(repr(Value(1)), "Value(1)")
def get_noncovid_locations_nearby_coords(latitude, longitude, max_distance=None): """ Return objects sorted by distance to specified coordinates which distance is less than max_distance given in kilometers """ # Great circle distance formula gcd_formula = "6371 * acos(least(greatest(\ cos(radians(%s)) * cos(radians(latitude)) \ * cos(radians(longitude) - radians(%s)) + \ sin(radians(%s)) * sin(radians(latitude)) \ , -1), 1))" distance_raw_sql = RawSQL(gcd_formula, (latitude, longitude, latitude)) qs = Hospital.objects.all() \ .annotate(distance=distance_raw_sql).order_by('distance') qs = qs.filter(covid_exclusive=False) qs = qs.filter(empty_beds__gt=0) if max_distance is not None: qs = qs.filter(distance__lt=max_distance) return qs
def filter_daily_total(queryset, context, qualif=False): if 'ano' in context and context['ano']: queryset = queryset.filter(ano=int(context['ano'])) if 'mes' in context and context['mes'] and not qualif: queryset = queryset.filter(mes=get_month(int(context['mes']))) elif 'mes' in context and context['mes'] and qualif: queryset = queryset.filter(mes=int(context['mes'])) if 'estagio' in context and context['estagio'] and context[ 'tipo'] == 'AWIFS': queryset = queryset.filter(estagio=context['estagio']) if 'uf' in context and context['uf'] and not qualif: queryset = queryset.filter(estado=context['uf']) if not qualif: return queryset.values('data_imagem')\ .annotate( total=Sum('area_km2'), ).annotate( dia=RawSQL("EXTRACT(day FROM data_imagem)", ()) ).order_by('dia')
def query_groups(self) -> List[List[bool]]: query: QuerySet = Person.objects.filter( team_id=self.feature_flag.team_id, persondistinctid__distinct_id=self.distinct_id, persondistinctid__team_id=self.feature_flag.team_id, ) fields = [] for index, group in enumerate(self.feature_flag.groups): key = f"group_{index}" if len(group.get("properties", {})) > 0: expr: Any = properties_to_Q(Filter(data=group).properties, team_id=self.feature_flag.team_id, is_person_query=True) else: expr = RawSQL("true", []) query = query.annotate( **{key: ExpressionWrapper(expr, output_field=BooleanField())}) fields.append(key) return list(query.values_list(*fields))
def tree_downwards(self, root_org_id): """ Возвращает корневую организацию с запрашиваемым root_org_id и всех её детей любого уровня вложенности TODO: Написать фильтр с помощью ORM или RawSQL запроса или функций Python :type root_org_id: int """ sql = """ WITH RECURSIVE children AS ( SELECT id FROM orgunits_organization WHERE orgunits_organization.id = %s UNION SELECT orgunits_organization.id FROM orgunits_organization INNER JOIN children ON orgunits_organization.parent_id = children.id ) SELECT * FROM children """ result = RawSQL(sql, [root_org_id]) return self.filter(id__in=result)
def delete_errored_internal_txs(apps, schema_editor): """ Previously all traces were stored, even the errored ones. This method will delete old errored traces still present on the database :param apps: :param schema_editor: :return: """ InternalTx = apps.get_model("history", "InternalTx") parent_errored_query = ( InternalTx.objects.annotate( child_trace_address=RawSQL('"history_internaltx"."trace_address"', tuple()) # Django bug, so we use RawSQL instead of: child_trace_address=OuterRef('trace_address') ).filter( child_trace_address__startswith=F("trace_address"), ethereum_tx=OuterRef("ethereum_tx"), ).exclude(error=None)) InternalTx.objects.annotate(parent_errored=Subquery( parent_errored_query.values("pk")[:1])).exclude( parent_errored=None, ).delete()
def get_tags_repr_queryset(cls, queryset): queryset = super().get_tags_repr_queryset(queryset) return queryset.annotate(customer_tags_repr=RawSQL( """select full_tag from ( select A2.forest_id, array_to_string(array_agg(distinct A2.full_tag), ',') as full_tag from ( select A1.forest_id, unnest(A1.full_tag) as full_tag from ( select fc.forest_id, (select array_agg(concat_ws(':', KEY, value)) AS full_tag FROM jsonb_each_text(c.tags) WHERE value IS NOT NULL) from crm_forestcustomer fc join crm_customer c on c.id = fc.customer_id where c.tags != '{}'::jsonb and forest_id = crm_forest.id ) A1 ) A2 group by A2.forest_id ) A3 where forest_id = crm_forest.id""", params=[], ), )
def get_compare_tests_aggregate_data(test_id, num_of_tests, order='-test__start_time', source='default'): ''' Compares given test with test_id against num_of_tests previous ''' project = Test.objects.filter(id=test_id).values('project_id') start_time = Test.objects.filter( id=test_id).values('start_time')[0]['start_time'] project_id = project[0]['project_id'] if source == 'default': data = TestData.objects. \ filter(test__start_time__lte=start_time, test__project_id=project_id, test__show=True, source=source, data_resolution_id = 1).\ annotate(display_name=F('test__display_name')). \ annotate(start_time=F('test__start_time')). \ values('display_name', 'start_time'). \ annotate(average=Sum(RawSQL("((data->>%s)::numeric)", ('avg',))*RawSQL("((data->>%s)::numeric)", ('count',)))/Sum(RawSQL("((data->>%s)::numeric)", ('count',)))). \ annotate(median=Sum(RawSQL("((data->>%s)::numeric)", ('median',))*RawSQL("((data->>%s)::numeric)", ('count',)))/Sum(RawSQL("((data->>%s)::numeric)", ('count',)))). \ order_by(order)[:int(num_of_tests)] elif source == 'graphite': tests = Test.objects.filter( start_time__lte=start_time, project_id=project_id, show=True).values().order_by('-start_time')[:int(num_of_tests)] for t in tests: test_id = t['id'] if not ServerMonitoringData.objects.filter( test_id=test_id, source='graphite', data_resolution_id=1 ).exists() or not TestData.objects.filter( test_id=test_id, source='graphite', data_resolution_id=1).exists(): result = update_test_graphite_data(test_id) data = TestData.objects. \ filter(test__start_time__lte=start_time, test__project_id=project_id, test__show=True, source=source, data_resolution_id = 1).\ annotate(display_name=F('test__display_name')). \ annotate(start_time=F('test__start_time')). \ values('display_name', 'start_time'). \ annotate(average=Avg(RawSQL("((data->>%s)::numeric)", ('avg',)))). \ annotate(median=Avg(RawSQL("((data->>%s)::numeric)", ('median',)))). \ order_by(order).order_by(order)[:int(num_of_tests)] return data
def get_compare_tests_server_monitoring_data(test_id, num_of_tests, order='-test__start_time'): '''Return cpu load data for N tests before the current one''' project = Test.objects.filter(id=test_id).values('project_id') project_id = project[0]['project_id'] start_time = Test.objects.filter( id=test_id).values('start_time')[0]['start_time'] data = (ServerMonitoringData.objects.filter( test__start_time__lte=start_time, test__project_id=project_id, test__show=True, data_resolution_id=1).values( 'test__display_name', 'server__server_name', 'test__start_time' ).annotate(cpu_load=RawSQL( "((data->>%s)::float)+((data->>%s)::float)+((data->>%s)::float)", ( 'CPU_user', 'CPU_iowait', 'CPU_system', ))).annotate( cpu_load=Avg('cpu_load')).order_by('-test__start_time')) return data
def get_queryset(self): if self.action == 'list': subquery_sql = "select sum(pay_value) from lighthouse_payment where id_contract_id=lighthouse_contract.id" param_state = int( self.request.GET.get('state', CONTRACT_STATE_UNDEFINED)) param_agent = self.request.GET.get('byAgent', None) queryset = Contract.objects.filter(deleted=False) if param_state != CONTRACT_STATE_UNDEFINED: queryset = queryset.filter(contract_state=int(param_state)) if param_agent: # отображаются контракты по менеджеру # а также, те, которые разрешены к просмотру (переданы права) contracts = EmployeeContractAccess.objects.filter(id_employee_id=param_agent) \ .filter(Q(to_date__gte=datetime.today()) | Q(to_date__isnull=True)) \ .values_list('id_contract__id', flat=True) queryset = queryset.filter( Q(id__in=contracts) | Q(id_agent__id=param_agent)) return queryset.values('id', 'num', 'id_client__clientname', 'contract_date', 'est_delivery', 'contract_state', 'id_agent__fio') \ .annotate(sum=Sum(F('specs__item_price') * F('specs__item_count'))) \ .annotate(payed=RawSQL(subquery_sql, ())) else: return Contract.objects.filter(deleted=False)
def action_rtot(request, test_id, action_id): min_timestamp = TestActionData.objects. \ filter(test_id=test_id, action_id=action_id). \ values("test_id", "action_id"). \ aggregate(min_timestamp=Min( RawSQL("((data->>%s)::timestamp)", ('timestamp',))))['min_timestamp'] x = TestActionData.objects. \ filter(test_id=test_id, action_id=action_id). \ annotate(timestamp=(RawSQL("((data->>%s)::timestamp)", ('timestamp',)) - min_timestamp)). \ annotate(average=RawSQL("((data->>%s)::numeric)", ('avg',))). \ annotate(median=RawSQL("((data->>%s)::numeric)", ('median',))). \ annotate(rps=(RawSQL("((data->>%s)::numeric)", ('count',))) / 60). \ annotate(errors=(RawSQL("((data->>%s)::numeric)", ('errors',))) / 60). \ values('timestamp', "average", "median", "rps", "errors"). \ order_by('timestamp') data = json.loads( json.dumps(list(x), indent=4, sort_keys=True, default=str)) return JsonResponse(data, safe=False)
def categories_changed_handler(sender, **kwargs): """ Обработчик события изменения кол-ва видимых продуктов, привязанных к категории и её подкатегориям. Используется для обновления счетчиков товаров в каждой категории. """ categories = kwargs.get('categories') include_self = kwargs.get('include_self', True) if isinstance(categories, ShopCategory): # экземпляр категории categories = ShopCategory.objects.filter(pk=categories.pk) elif isinstance(categories, (int, str)): # строка или число, являющееся ID категории categories = ShopCategory.objects.filter(pk=categories) elif isinstance(categories, (list, tuple, set, ValuesListQuerySet)): # список строк или чисел, являющихся ID категории categories = ShopCategory.objects.filter(pk__in=categories) elif isinstance(categories, QuerySet) and categories.model is ShopCategory: # QuerySet категорий pass else: raise TypeError('Invalid categories for signal "categories_changed"') ancestors = categories.get_ancestors(include_self=include_self).filter( is_visible=True).order_by('tree_id', '-level').values_list('id', flat=True) with transaction.atomic(): for category_id in ancestors: ShopCategory.objects.filter(pk=category_id).update( total_product_count=RawSQL( 'SELECT shop_shopcategory.product_count + ' 'COALESCE(SUM(ssc.total_product_count), 0) ' 'FROM shop_shopcategory AS ssc ' 'WHERE ssc.parent_id = shop_shopcategory.id ' 'AND ssc.is_visible = TRUE', ()))
def process_math(query: QuerySet, entity: Entity) -> QuerySet: math_to_aggregate_function = { "sum": Sum, "avg": Avg, "min": Min, "max": Max } if entity.math == "dau": # In daily active users mode count only up to 1 event per user per day query = query.annotate(count=Count("person_id", distinct=True)) elif entity.math in math_to_aggregate_function: # Run relevant aggregate function on specified event property, casting it to a double query = query.annotate(count=math_to_aggregate_function[entity.math]( Cast(RawSQL('"posthog_event"."properties"->>%s', ( entity.math_property, )), output_field=FloatField()))) # Skip over events where the specified property is not set or not a number # It may not be ideally clear to the user what events were skipped, # but in the absence of typing, this is safe, cheap, and frictionless query = query.extra(where=[ 'jsonb_typeof("posthog_event"."properties"->%s) = \'number\'' ], params=[entity.math_property]) return query
def tree_upwards(self, child_org_id): """ На основе https://developpaper.com/the-implementation-of-finding-all-child-nodes-by-sql-parent-node/ """ query = ''' WITH w1( id, parent_id, level) AS (SELECT id, parent_id, 0 AS level FROM orgunits_organization WHERE id = %s UNION ALL SELECT orgunits_organization.id, orgunits_organization.parent_id, level + 1 FROM orgunits_organization JOIN w1 ON orgunits_organization.id= w1.parent_id ) SELECT id FROM w1 ''' return self.filter(id__in=RawSQL(query, [child_org_id]))
def tree_upwards(self, child_org_id): """ Возвращает корневую организацию с запрашиваемым child_org_id и всех её родителей любого уровня вложенности. Рекурсивный запрос с помощью RaqSQL, в рамках тестов на оптимизацию. """ raw_query = RawSQL( """ WITH RECURSIVE parents AS ( SELECT id, parent_id FROM orgunits_organization WHERE id = %s UNION ALL SELECT o.id, o.parent_id FROM orgunits_organization o, parents p WHERE o.id = p.parent_id ) SELECT id FROM parents """, [child_org_id] ) return self.filter(id__in=raw_query)
def tree_downwards(self, root_org_id): """ Возвращает корневую организацию с запрашиваемым root_org_id и всех её детей любого уровня вложенности. Рекурсивный запрос с помощью RaqSQL, в рамках тестов на оптимизацию. """ raw_query = RawSQL( """ WITH RECURSIVE children AS ( SELECT id FROM orgunits_organization WHERE id = %s UNION ALL SELECT o.id FROM orgunits_organization o, children c WHERE o.parent_id = c.id ) SELECT id FROM children """, [root_org_id] ) return self.filter(id__in=raw_query)
def test_contains_contained_by_with_key_transform(self): tests = [ ('value__d__contains', 'e'), ('value__d__contains', [{ 'f': 'g' }]), ('value__contains', KeyTransform('bax', 'value')), ('value__contains', F('value__bax')), ('value__baz__contains', { 'a': 'b' }), ('value__baz__contained_by', { 'a': 'b', 'c': 'd', 'e': 'f' }), ( 'value__contained_by', KeyTransform( 'x', RawSQL( self.raw_sql, ['{"x": {"a": "b", "c": 1, "d": "e"}}'], )), ), ] # For databases where {'f': 'g'} (without surrounding []) matches # [{'f': 'g'}]. if not connection.features.json_key_contains_list_matching_requires_list: tests.append(('value__d__contains', {'f': 'g'})) for lookup, value in tests: with self.subTest(lookup=lookup, value=value): self.assertIs( NullableJSONModel.objects.filter(**{ lookup: value }, ).exists(), True)
def lock_first(self, relation): item = None field = self.model._meta.get_field(relation) if not field.is_relation: raise ValueError('Field "{}" of "{}" is not a relation'.format( relation, self.model)) try: item = type(self)(self.model, using=self.db).select_related( relation).filter(id__in=self.values('id')[:1]).annotate( lock_acquired=RawSQL(self.LOCK_ACQUIRED.format( field), [field.related_model._meta.db_table])).first() yield item finally: if item and item.lock_acquired: with connections[self.db].cursor() as cursor: cursor.execute( 'SELECT pg_advisory_unlock(%s::REGCLASS::INTEGER, %s)', [ field.related_model._meta.db_table, getattr(item, field.attname) ])
class AggregateListView(generic.ObjectListView): queryset = Aggregate.objects.annotate(child_count=RawSQL( 'SELECT COUNT(*) FROM ipam_prefix WHERE ipam_prefix.prefix <<= ipam_aggregate.prefix', ())) filterset = filters.AggregateFilterSet filterset_form = forms.AggregateFilterForm table = tables.AggregateDetailTable template_name = 'ipam/aggregate_list.html' def extra_context(self): ipv4_total = 0 ipv6_total = 0 for aggregate in self.queryset: if aggregate.prefix.version == 6: # Report equivalent /64s for IPv6 to keep things sane ipv6_total += int(aggregate.prefix.size / 2**64) else: ipv4_total += aggregate.prefix.size return { 'ipv4_total': ipv4_total, 'ipv6_total': ipv6_total, }
def get_aggregate_total(query: QuerySet, entity: Entity) -> int: entity_total = 0 if entity.math == "dau": _query, _params = query.query.sql_with_params() with connection.cursor() as cursor: cursor.execute("SELECT count(DISTINCT person_id) FROM ({}) as aggregates".format(_query), _params) entity_total = cursor.fetchall()[0][0] elif entity.math in MATH_TO_AGGREGATE_FUNCTION: query = query.annotate( math_prop=Cast( RawSQL('"posthog_event"."properties"->>%s', (entity.math_property,)), output_field=FloatField(), ) ) query = query.extra( where=['jsonb_typeof("posthog_event"."properties"->%s) = \'number\''], params=[entity.math_property], ) _query, _params = query.query.sql_with_params() with connection.cursor() as cursor: agg_func = MATH_TO_AGGREGATE_STRING[entity.math].format(math_prop="math_prop") cursor.execute("SELECT {} FROM ({}) as aggregates".format(agg_func, _query), (_params)) entity_total = cursor.fetchall()[0][0] else: entity_total = len(query) return entity_total
def get_facets(self): facets = self.request.query_params.getlist("facets", []) out_facets = {} for facet in facets: # JSONField detection # see: https://stackoverflow.com/questions/34325096/how-to-aggregate-min-max-etc-over-django-jsonfield-data # see: https://code.djangoproject.com/ticket/25828 json_field = False pieces = facet.split("__") if len(pieces) > 1: fieldname = pieces[0] try: field_instance = self.facets_queryset.model._meta.get_field( fieldname) if isinstance(field_instance, JSONField): json_field = True except FieldError: continue if json_field: rawsql_field = "%s" % fieldname #rawsql_field += "->%s" for p in pieces[1:]: rawsql_field += "->%s" rawsql_field = "(%s)" % rawsql_field #subfield = ("__").join(pieces[1:]) annotate_dict = {facet: RawSQL(rawsql_field, pieces[1:])} counts = self.facets_queryset.annotate(**annotate_dict)\ .values(facet).annotate(count=Count(facet)).order_by(facet) else: counts = self.facets_queryset.values(facet).annotate( count=Count(facet)).order_by(facet) out_facets[facet] = counts return out_facets
def get_ordering(self, request, queryset, view): ordering = super().get_ordering(request, queryset, view) if not ordering: ordering = [] params = request.query_params.get(self.ordering_param) if params: fields = [param.strip() for param in params.split(',')] for field in fields: json_nested = field.split('__') model_field = json_nested.pop(0) descending = False if (model_field in ordering or not self.remove_invalid_fields(queryset, [ model_field, ], view, request)): # The model_field must be an authorized field continue if model_field.startswith('-'): descending = True model_field = model_field[1:] tpl = model_field + ''.join( ['->>%s' for x in range(len(json_nested))]) ordering.append( OrderBy(RawSQL('lower({})'.format(tpl), json_nested), descending=descending)) if not ordering: ordering = None return ordering
def _build_filename_contains_raw_query(field, value): # It is not possible to use Django's ORM to query for if one item in a JSONB # list has has a key which contains a specified value. # # The closest thing the Django ORM provides is: # queryset.filter(your_json_field__contains=[{"key":"value"}]) # However this is an exact match, so in the above example [{"key":"value_etc"}] # would not match the filter. # # Instead we have to resort to RawSQL to use various built in PostgreSQL JSON # Array manipulation functions to be able to 'iterate' over a JSONB list # performing `like` on individual keys in said list. num_files_with_name_like_value = f""" EXISTS( SELECT attached_files ->> 'visible_name' FROM JSONB_ARRAY_ELEMENTS("field_{field.id}") as attached_files WHERE UPPER(attached_files ->> 'visible_name') LIKE UPPER(%s) ) """ return RawSQL( num_files_with_name_like_value, params=[f"%{value}%"], output_field=BooleanField(), )
def basequeryset(reportclass, request, *args, **kwargs): q = Demand.objects.all() if "item" in request.GET: item = Item.objects.using( request.database).get(name__exact=unquote(request.GET["item"])) q = q.filter(item__lft__gte=item.lft, item__lft__lt=item.rght) if "location" in request.GET: location = Location.objects.using(request.database).get( name__exact=unquote(request.GET["location"])) q = q.filter(location__lft__gte=location.lft, location__lft__lt=location.rght) if "customer" in request.GET: customer = Customer.objects.using(request.database).get( name__exact=unquote(request.GET["customer"])) q = q.filter(customer_lft__gte=customer.lft, customer_lft__lt=customer.rght) if "status_in" in request.GET: status = unquote(request.GET["status_in"]) q = q.filter(status__in=status.split(",")) return q.annotate( plannedshort=RawSQL("quantity - plannedquantity", []))
def leaderboard(request, challenge_phase_split_id): """Returns leaderboard for a corresponding Challenge Phase Split""" # check if the challenge exists or not try: challenge_phase_split = ChallengePhaseSplit.objects.get( pk=challenge_phase_split_id) except ChallengePhaseSplit.DoesNotExist: response_data = {'error': 'Challenge Phase Split does not exist'} return Response(response_data, status=status.HTTP_400_BAD_REQUEST) # Get the leaderboard associated with the Challenge Phase Split leaderboard = challenge_phase_split.leaderboard # Get the default order by key to rank the entries on the leaderboard try: default_order_by = leaderboard.schema['default_order_by'] except: response_data = {'error': 'Sorry, Default filtering key not found in leaderboard schema!'} return Response(response_data, status=status.HTTP_400_BAD_REQUEST) # Exclude the submissions done by members of the host team # while populating leaderboard challenge_obj = challenge_phase_split.challenge_phase.challenge challenge_hosts_emails = challenge_obj.creator.get_all_challenge_host_email() is_challenge_phase_public = challenge_phase_split.challenge_phase.is_public # Exclude the submissions from challenge host team to be displayed on the leaderboard of public phases challenge_hosts_emails = [] if not is_challenge_phase_public else challenge_hosts_emails leaderboard_data = LeaderboardData.objects.exclude( submission__created_by__email__in=challenge_hosts_emails) # Get all the successful submissions related to the challenge phase split leaderboard_data = leaderboard_data.filter( challenge_phase_split=challenge_phase_split, submission__is_public=True, submission__is_flagged=False, submission__status=Submission.FINISHED).order_by('created_at') leaderboard_data = leaderboard_data.annotate( filtering_score=RawSQL('result->>%s', (default_order_by, ), output_field=FloatField())).values( 'id', 'submission__participant_team__team_name', 'challenge_phase_split', 'result', 'filtering_score', 'leaderboard__schema', 'submission__submitted_at') sorted_leaderboard_data = sorted(leaderboard_data, key=lambda k: float(k['filtering_score']), reverse=True) distinct_sorted_leaderboard_data = [] team_list = [] for data in sorted_leaderboard_data: if data['submission__participant_team__team_name'] in team_list: continue else: distinct_sorted_leaderboard_data.append(data) team_list.append(data['submission__participant_team__team_name']) leaderboard_labels = challenge_phase_split.leaderboard.schema['labels'] for item in distinct_sorted_leaderboard_data: item['result'] = [item['result'][index] for index in leaderboard_labels] paginator, result_page = paginated_queryset( distinct_sorted_leaderboard_data, request, pagination_class=StandardResultSetPagination()) challenge_host_user = is_user_a_host_of_challenge(request.user, challenge_obj.pk) # Show the Private leaderboard only if the user is a challenge host if challenge_host_user: response_data = result_page return paginator.get_paginated_response(response_data) # Check if challenge phase leaderboard is public for participant user or not elif challenge_phase_split.visibility != ChallengePhaseSplit.PUBLIC: response_data = {'error': 'Sorry, the leaderboard is not public!'} return Response(response_data, status=status.HTTP_400_BAD_REQUEST) else: response_data = result_page return paginator.get_paginated_response(response_data)
def _spatial_attribute(self, att, settings, field_name=None, model_att=None): """ DRY routine for calling a spatial stored procedure on a geometry column and attaching its output as an attribute of the model. Arguments: att: The name of the spatial attribute that holds the spatial SQL function to call. settings: Dictionary of internal settings to customize for the spatial procedure. Public Keyword Arguments: field_name: The name of the geographic field to call the spatial function on. May also be a lookup to a geometry field as part of a foreign key relation. model_att: The name of the model attribute to attach the output of the spatial function to. """ warnings.warn( "The %s GeoQuerySet method is deprecated. See GeoDjango Functions " "documentation to find the expression-based replacement." % att, RemovedInDjango20Warning, stacklevel=2 ) # Default settings. settings.setdefault('desc', None) settings.setdefault('geom_args', ()) settings.setdefault('geom_field', None) settings.setdefault('procedure_args', {}) settings.setdefault('procedure_fmt', '%(geo_col)s') settings.setdefault('select_params', []) connection = connections[self.db] # Performing setup for the spatial column, unless told not to. if settings.get('setup', True): default_args, geo_field = self._spatial_setup( att, desc=settings['desc'], field_name=field_name, geo_field_type=settings.get('geo_field_type')) for k, v in six.iteritems(default_args): settings['procedure_args'].setdefault(k, v) else: geo_field = settings['geo_field'] # The attribute to attach to the model. if not isinstance(model_att, six.string_types): model_att = att # Special handling for any argument that is a geometry. for name in settings['geom_args']: # Using the field's get_placeholder() routine to get any needed # transformation SQL. geom = geo_field.get_prep_value(settings['procedure_args'][name]) params = geo_field.get_db_prep_lookup('contains', geom, connection=connection) geom_placeholder = geo_field.get_placeholder(geom, None, connection) # Replacing the procedure format with that of any needed # transformation SQL. old_fmt = '%%(%s)s' % name new_fmt = geom_placeholder % '%%s' settings['procedure_fmt'] = settings['procedure_fmt'].replace(old_fmt, new_fmt) settings['select_params'].extend(params) # Getting the format for the stored procedure. fmt = '%%(function)s(%s)' % settings['procedure_fmt'] # If the result of this function needs to be converted. if settings.get('select_field'): select_field = settings['select_field'] if connection.ops.oracle: select_field.empty_strings_allowed = False else: select_field = Field() # Finally, setting the extra selection attribute with # the format string expanded with the stored procedure # arguments. self.query.add_annotation( RawSQL(fmt % settings['procedure_args'], settings['select_params'], select_field), model_att) return self
def test_rawsql(self): Post.objects.cache().filter( category__in=RawSQL("select 1", ())).count()
def standings(request, title_slug=None, contest_id=None, template='standings.html', extra_context=None): context = {} groupby = request.GET.get('groupby') if groupby == 'none': groupby = None search = request.GET.get('search') if search == '': url = request.get_full_path() url = re.sub('search=&?', '', url) url = re.sub(r'\?$', '', url) return redirect(url) orderby = request.GET.getlist('orderby') if orderby: if '--' in orderby: updated_orderby = [] else: orderby_set = set() unique_orderby = reversed([ f for k, f in [(f.lstrip('-'), f) for f in reversed(orderby)] if k not in orderby_set and not orderby_set.add(k) ]) updated_orderby = [ f for f in unique_orderby if not f.startswith('--') ] if updated_orderby != orderby: query = request.GET.copy() query.setlist('orderby', updated_orderby) return redirect(f'{request.path}?{query.urlencode()}') contests = Contest.objects to_redirect = False contest = None if contest_id is not None: contest = contests.filter(pk=contest_id).first() if title_slug is None: to_redirect = True else: if contest is None or slug(contest.title) != title_slug: contest = None title_slug += f'-{contest_id}' if contest is None and title_slug is not None: contests_iterator = contests.filter(slug=title_slug).iterator() contest = None try: contest = next(contests_iterator) another = next(contests_iterator) except StopIteration: another = None if contest is None: return HttpResponseNotFound() if another is None: to_redirect = True else: return redirect( reverse('ranking:standings_list') + f'?search=slug:{title_slug}') if contest is None: return HttpResponseNotFound() if to_redirect: query = query_transform(request) url = reverse('ranking:standings', kwargs={ 'title_slug': slug(contest.title), 'contest_id': str(contest.pk) }) if query: query = '?' + query return redirect(url + query) with_detail = request.GET.get('detail', 'true') in ['true', 'on'] if request.user.is_authenticated: coder = request.user.coder if 'detail' in request.GET: coder.settings['standings_with_detail'] = with_detail coder.save() else: with_detail = coder.settings.get('standings_with_detail', False) else: coder = None with_row_num = False contest_fields = list(contest.info.get('fields', [])) hidden_fields = list(contest.info.get('hidden_fields', [])) statistics = Statistics.objects.filter(contest=contest) options = contest.info.get('standings', {}) order = None resource_standings = contest.resource.info.get('standings', {}) order = copy.copy(options.get('order', resource_standings.get('order'))) if order: for f in order: if f.startswith('addition__') and f.split( '__', 1)[1] not in contest_fields: order = None break if order is None: order = ['place_as_int', '-solving'] # fixed fields fixed_fields = ( ('penalty', 'Penalty'), ('total_time', 'Time'), ('advanced', 'Advance'), ) fixed_fields += tuple(options.get('fixed_fields', [])) if not with_detail: fixed_fields += (('rating_change', 'Rating change'), ) statistics = statistics \ .select_related('account') \ .select_related('account__resource') \ .prefetch_related('account__coders') has_country = ('country' in contest_fields or '_countries' in contest_fields or statistics.filter(account__country__isnull=False).exists()) division = request.GET.get('division') if division == 'any': with_row_num = True if 'place_as_int' in order: order.remove('place_as_int') order.append('place_as_int') fixed_fields += (('division', 'Division'), ) if 'team_id' in contest_fields and not groupby: order.append('addition__name') statistics = statistics.distinct(*[f.lstrip('-') for f in order]) # host = resource_standings.get('account_team_resource', contest.resource.host) # account_team_resource = Resource.objects.get(host=host) # context['account_team_resource'] = account_team_resource # statistics = statistics.annotate( # accounts=RawSQL( # ''' # SELECT array_agg(array[u2.key, u3.rating::text, u3.url]) # FROM "ranking_statistics" U0 # INNER JOIN "ranking_account" U2 # ON (u0."account_id" = u2."id") # INNER JOIN "ranking_account" U3 # ON (u2."key" = u3."key" AND u3."resource_id" = %s) # WHERE ( # u0."contest_id" = %s # AND ("u0"."addition" -> 'team_id') = ("ranking_statistics"."addition" -> 'team_id') # ) # ''', # [account_team_resource.pk, contest.pk] # ) # ) order.append('pk') statistics = statistics.order_by(*order) fields = OrderedDict() for k, v in fixed_fields: if k in contest_fields: fields[k] = v n_highlight_context = _standings_highlight(statistics, options) # field to select fields_to_select_defaults = { 'rating': { 'options': ['rated', 'unrated'], 'noajax': True, 'nomultiply': True, 'nourl': True }, 'advanced': { 'options': ['true', 'false'], 'noajax': True, 'nomultiply': True }, 'highlight': { 'options': ['true', 'false'], 'noajax': True, 'nomultiply': True }, } fields_to_select = OrderedDict() map_fields_to_select = {'rating_change': 'rating'} for f in sorted(contest_fields): f = f.strip('_') if f.lower() in [ 'institution', 'room', 'affiliation', 'city', 'languages', 'school', 'class', 'job', 'region', 'rating_change', 'advanced', 'company', 'language', 'league', 'onsite', 'degree', 'university', 'list', ]: f = map_fields_to_select.get(f, f) field_to_select = fields_to_select.setdefault(f, {}) field_to_select['values'] = [ v for v in request.GET.getlist(f) if v ] field_to_select.update(fields_to_select_defaults.get(f, {})) if n_highlight_context.get('statistics_ids'): f = 'highlight' field_to_select = fields_to_select.setdefault(f, {}) field_to_select['values'] = [v for v in request.GET.getlist(f) if v] field_to_select.update(fields_to_select_defaults.get(f, {})) chats = coder.chats.all() if coder else None if chats: options_values = {c.chat_id: c.title for c in chats} fields_to_select['chat'] = { 'values': [ v for v in request.GET.getlist('chat') if v and v in options_values ], 'options': options_values, 'noajax': True, 'nogroupby': True, 'nourl': True, } hidden_fields_values = [v for v in request.GET.getlist('field') if v] for v in hidden_fields_values: if v not in hidden_fields: hidden_fields.append(v) for k in contest_fields: if (k in fields or k in [ 'problems', 'team_id', 'solved', 'hack', 'challenges', 'url', 'participant_type', 'division' ] or k == 'medal' and '_medal_title_field' in contest_fields or 'country' in k and k not in hidden_fields_values or k in ['name'] and k not in hidden_fields_values or k.startswith('_') or k in hidden_fields and k not in hidden_fields_values): continue if with_detail or k in hidden_fields_values: fields[k] = k else: hidden_fields.append(k) for k, field in fields.items(): if k != field: continue field = ' '.join(k.split('_')) if field and not field[0].isupper(): field = field.title() fields[k] = field if hidden_fields: fields_to_select['field'] = { 'values': hidden_fields_values, 'options': hidden_fields, 'noajax': True, 'nogroupby': True, 'nourl': True, 'nofilter': True, } per_page = options.get('per_page', 50) if per_page is None: per_page = 100500 elif contest.n_statistics and contest.n_statistics < 500: per_page = contest.n_statistics mod_penalty = {} first = statistics.first() if first and all('time' not in k for k in contest_fields): penalty = first.addition.get('penalty') if penalty and isinstance(penalty, int) and 'solved' not in first.addition: mod_penalty.update({'solving': first.solving, 'penalty': penalty}) params = {} problems = contest.info.get('problems', {}) if 'division' in problems: divisions_order = list( problems.get('divisions_order', sorted(contest.info['problems']['division'].keys()))) elif 'divisions_order' in contest.info: divisions_order = contest.info['divisions_order'] else: divisions_order = [] if divisions_order: divisions_order.append('any') if division not in divisions_order: division = divisions_order[0] params['division'] = division if 'division' in problems: if division == 'any': _problems = OrderedDict() for div in reversed(divisions_order): for p in problems['division'].get(div, []): k = get_problem_short(p) if k not in _problems: _problems[k] = p else: for f in 'n_accepted', 'n_teams', 'n_partial', 'n_total': if f in p: _problems[k][f] = _problems[k].get( f, 0) + p[f] problems = list(_problems.values()) else: problems = problems['division'][division] if division != 'any': statistics = statistics.filter(addition__division=division) for p in problems: if 'full_score' in p and isinstance( p['full_score'], (int, float)) and abs(p['full_score'] - 1) > 1e-9: mod_penalty = {} break last = None merge_problems = False for p in problems: if last and (last.get('full_score') or last.get('subname')) and ( 'name' in last and last.get('name') == p.get('name') or 'group' in last and last.get('group') == p.get('group')): merge_problems = True last['colspan'] = last.get('colspan', 1) + 1 p['skip'] = True else: last = p last['colspan'] = 1 # own_stat = statistics.filter(account__coders=coder).first() if coder else None # filter by search search = request.GET.get('search') if search: with_row_num = True if search.startswith('party:'): _, party_slug = search.split(':') party = get_object_or_404(Party.objects.for_user(request.user), slug=party_slug) statistics = statistics.filter( Q(account__coders__in=party.coders.all()) | Q(account__coders__in=party.admins.all()) | Q(account__coders=party.author)) else: cond = get_iregex_filter(search, 'account__key', 'addition__name', logger=request.logger) statistics = statistics.filter(cond) # filter by country countries = request.GET.getlist('country') countries = set([c for c in countries if c]) if countries: with_row_num = True cond = Q(account__country__in=countries) if 'None' in countries: cond |= Q(account__country__isnull=True) if '_countries' in contest_fields: for code in countries: name = get_country_name(code) if name: cond |= Q(addition___countries__icontains=name) statistics = statistics.filter(cond) params['countries'] = countries # filter by field to select for field, field_to_select in fields_to_select.items(): values = field_to_select.get('values') if not values or field_to_select.get('nofilter'): continue with_row_num = True filt = Q() if field == 'languages': for lang in values: if lang == 'any': filt = Q(**{'addition___languages__isnull': False}) break filt |= Q(**{'addition___languages__contains': [lang]}) elif field == 'rating': for q in values: if q not in field_to_select['options']: continue q = q == 'unrated' if q: filt |= Q(addition__rating_change__isnull=True) & Q( addition__new_rating__isnull=True) else: filt |= Q(addition__rating_change__isnull=False) | Q( addition__new_rating__isnull=False) elif field == 'advanced': for q in values: if q not in field_to_select['options']: continue filt |= Q(addition__advanced=q == 'true') elif field == 'highlight': for q in values: if q not in field_to_select['options']: continue filt = Q(pk__in=n_highlight_context.get('statistics_ids', {})) if q == 'false': filt = ~filt elif field == 'chat': for q in values: if q not in field_to_select['options']: continue chat = Chat.objects.filter(chat_id=q, is_group=True).first() if chat: filt |= Q(account__coders__in=chat.coders.all()) # subquery = Chat.objects.filter(coder=OuterRef('account__coders'), is_group=False).values('name')[:1] # statistics = statistics.annotate(chat_name=Subquery(subquery)) else: query_field = f'addition__{field}' statistics = statistics.annotate(**{ f'{query_field}_str': Cast(JSONF(query_field), models.TextField()) }) for q in values: if q == 'None': filt |= Q(**{f'{query_field}__isnull': True}) else: filt |= Q(**{f'{query_field}_str': q}) statistics = statistics.filter(filt) # groupby if groupby == 'country' or groupby in fields_to_select: statistics = statistics.order_by('pk') participants_info = n_highlight_context.get('participants_info') n_highlight = options.get('n_highlight') advanced_by_participants_info = participants_info and n_highlight and groupby != 'languages' fields = OrderedDict() fields['groupby'] = groupby.title() fields['n_accounts'] = 'Num' fields['avg_score'] = 'Avg' medals = {m['name']: m for m in options.get('medals', [])} if 'medal' in contest_fields: for medal in settings.ORDERED_MEDALS_: fields[f'n_{medal}'] = medals.get(medal, {}).get( 'value', medal[0].upper()) if 'advanced' in contest_fields or advanced_by_participants_info: fields['n_advanced'] = 'Adv' orderby = [f for f in orderby if f.lstrip('-') in fields ] or ['-n_accounts', '-avg_score'] if groupby == 'languages': _, before_params = statistics.query.sql_with_params() querysets = [] for problem in problems: key = get_problem_short(problem) field = f'addition__problems__{key}__language' score = f'addition__problems__{key}__result' qs = statistics \ .filter(**{f'{field}__isnull': False, f'{score}__isnull': False}) \ .annotate(language=Cast(JSONF(field), models.TextField())) \ .annotate(score=Case( When(**{f'{score}__startswith': '+'}, then=1), When(**{f'{score}__startswith': '-'}, then=0), When(**{f'{score}__startswith': '?'}, then=0), default=Cast(JSONF(score), models.FloatField()), output_field=models.FloatField(), )) \ .annotate(sid=F('pk')) querysets.append(qs) merge_statistics = querysets[0].union(*querysets[1:], all=True) language_query, language_params = merge_statistics.query.sql_with_params( ) field = 'solving' statistics = statistics.annotate(groupby=F(field)) elif groupby == 'rating': statistics = statistics.annotate(groupby=Case( When(addition__rating_change__isnull=False, then=Value('Rated')), default=Value('Unrated'), output_field=models.TextField(), )) elif groupby == 'country': if '_countries' in contest_fields: statistics = statistics.annotate(country=RawSQL( '''json_array_elements((("addition" ->> '_countries'))::json)::jsonb''', [])) field = 'country' else: field = 'account__country' statistics = statistics.annotate(groupby=F(field)) else: field = f'addition__{groupby}' types = contest.info.get('fields_types', {}).get(groupby, []) if 'int' in types: field_type = models.IntegerField() elif 'float' in types: field_type = models.FloatField() else: field_type = models.TextField() statistics = statistics.annotate( groupby=Cast(JSONF(field), field_type)) statistics = statistics.order_by('groupby') statistics = statistics.values('groupby') statistics = statistics.annotate(n_accounts=Count('id')) statistics = statistics.annotate(avg_score=Avg('solving')) if 'medal' in contest_fields: for medal in settings.ORDERED_MEDALS_: n_medal = f'n_{medal}' statistics = statistics.annotate( **{ f'{n_medal}': Count(Case(When(addition__medal__iexact=medal, then=1))) }) if 'advanced' in contest_fields: statistics = statistics.annotate(n_advanced=Count( Case( When(addition__advanced=True, then=1), When(~Q(addition__advanced=False) & ~Q(addition__advanced=''), then=1), ))) elif advanced_by_participants_info: pks = list() for pk, info in participants_info.items(): if 'n' not in info or info['n'] > info.get( 'n_highlight', n_highlight): continue pks.append(pk) statistics = statistics.annotate( n_advanced=Count(Case(When(pk__in=set(pks), then=1)))) statistics = statistics.order_by(*orderby) if groupby == 'languages': query, sql_params = statistics.query.sql_with_params() query = query.replace( f'"ranking_statistics"."{field}" AS "groupby"', '"language" AS "groupby"') query = query.replace(f'GROUP BY "ranking_statistics"."{field}"', 'GROUP BY "language"') query = query.replace('"ranking_statistics".', '') query = query.replace('AVG("solving") AS "avg_score"', 'AVG("score") AS "avg_score"') query = query.replace('COUNT("id") AS "n_accounts"', 'COUNT("sid") AS "n_accounts"') query = re.sub('FROM "ranking_statistics".*GROUP BY', f'FROM ({language_query}) t1 GROUP BY', query) sql_params = sql_params[:-len(before_params)] + language_params with connection.cursor() as cursor: cursor.execute(query, sql_params) columns = [col[0] for col in cursor.description] statistics = [ dict(zip(columns, row)) for row in cursor.fetchall() ] statistics = ListAsQueryset(statistics) problems = [] labels_groupby = { 'n_accounts': 'Number of participants', 'avg_score': 'Average score', 'n_advanced': 'Number of advanced', } for medal in settings.ORDERED_MEDALS_: labels_groupby[f'n_{medal}'] = 'Number of ' + medals.get( medal, {}).get('value', medal) num_rows_groupby = statistics.count() map_colors_groupby = { s['groupby']: idx for idx, s in enumerate(statistics) } else: groupby = 'none' labels_groupby = None num_rows_groupby = None map_colors_groupby = None my_statistics = [] if groupby == 'none' and coder: statistics = statistics.annotate( my_stat=SubqueryExists('account__coders', filter=Q(coder=coder))) my_statistics = statistics.filter(account__coders=coder).extra( select={'floating': True}) context.update({ 'standings_options': options, 'mod_penalty': mod_penalty, 'colored_by_group_score': mod_penalty or options.get('colored_by_group_score'), 'contest': contest, 'statistics': statistics, 'my_statistics': my_statistics, 'problems': problems, 'params': params, 'fields': fields, 'fields_types': contest.info.get('fields_types', {}), 'divisions_order': divisions_order, 'has_country': has_country, 'per_page': per_page, 'with_row_num': with_row_num, 'merge_problems': merge_problems, 'fields_to_select': fields_to_select, 'truncatechars_name_problem': 10 * (2 if merge_problems else 1), 'with_detail': with_detail, 'groupby': groupby, 'pie_limit_rows_groupby': 50, 'labels_groupby': labels_groupby, 'num_rows_groupby': num_rows_groupby, 'map_colors_groupby': map_colors_groupby, 'advance': contest.info.get('advance'), 'timezone': get_timezone(request), 'timeformat': get_timeformat(request), 'with_neighbors': request.GET.get('neighbors') == 'on', 'with_table_inner_scroll': not request.user_agent.is_mobile, }) context.update(n_highlight_context) if extra_context is not None: context.update(extra_context) return render(request, template, context)
def add_i18n_name(self, language): queryset = self._clone() alias = unique_together_left_join(queryset, ProblemTranslation, 'problem', 'language', language) return queryset.annotate(i18n_name=RawSQL('%s.name' % alias, ()))
def _get_model_status(self, master_link, apps, fixtures, reports, keywords): model_status = [] if not master_link: return model_status models_seen = set() history = DomainLinkHistory.objects.filter( link=master_link ).annotate(row_number=RawSQL( 'row_number() OVER (PARTITION BY model, model_detail ORDER BY date DESC)', [])) linked_models = dict(LINKED_MODELS) timezone = get_timezone_for_request() for action in history: models_seen.add(action.model) if action.row_number != 1: # first row is the most recent continue name = linked_models[action.model] update = { 'type': action.model, 'name': name, 'last_update': server_to_user_time(action.date, timezone), 'detail': action.model_detail, 'can_update': True } if action.model == 'app': app_name = ugettext('Unknown App') if action.model_detail: detail = action.wrapped_detail app = apps.pop(detail.app_id, None) app_name = app.name if app else detail.app_id if app: update['detail'] = action.model_detail else: update['can_update'] = False else: update['can_update'] = False update['name'] = '{} ({})'.format(name, app_name) if action.model == 'fixture': tag_name = ugettext('Unknown Table') can_update = False if action.model_detail: detail = action.wrapped_detail tag = action.wrapped_detail.tag try: fixture = fixtures.get(tag) del fixtures[tag] except KeyError: fixture = get_fixture_data_type_by_tag( self.domain, tag) tag_name = fixture.tag can_update = fixture.is_global update['name'] = f'{name} ({tag_name})' update['can_update'] = can_update if action.model == 'report': report_id = action.wrapped_detail.report_id try: report = reports.get(report_id) del reports[report_id] except KeyError: report = ReportConfiguration.get(report_id) update['name'] = f'{name} ({report.title})' if action.model == 'keyword': keyword_id = action.wrapped_detail.linked_keyword_id try: keyword = keywords[keyword_id].keyword del keywords[keyword_id] except KeyError: try: keyword = Keyword.objects.get(id=keyword_id).keyword except Keyword.DoesNotExist: keyword = ugettext_lazy("Deleted Keyword") update['can_update'] = False update['name'] = f'{name} ({keyword})' model_status.append(update) # Add in models and apps that have never been synced model_status.extend( self._get_master_model_status(apps, fixtures, reports, keywords, ignore_models=models_seen)) return model_status