def as_sql(self, compiler, connection): if connection.ops.oracle: self.output_field = AreaField('sq_m') # Oracle returns area in units of meters. else: if connection.ops.geography: # Geography fields support area calculation, returns square meters. self.output_field = AreaField('sq_m') elif not self.output_field.geodetic(connection): # Getting the area units of the geographic field. units = self.output_field.units_name(connection) if units: self.output_field = AreaField( AreaMeasure.unit_attname(self.output_field.units_name(connection))) else: self.output_field = FloatField() else: # TODO: Do we want to support raw number areas for geodetic fields? raise NotImplementedError('Area on geodetic coordinate systems not supported.') return super(Area, self).as_sql(compiler, connection)
def get(self, request, id): invoice_object = Invoice.objects.get(id=id) invoicem_service = Invoice_Service.objects.filter( invoice_id=id).annotate( service_name=F('service__name'), price_per_hour=ExpressionWrapper(F('service__unit_price'), output_field=FloatField()), value=ExpressionWrapper(F('service__unit_price') * F('quantity'), output_field=FloatField()), tax_value=ExpressionWrapper( (F('service__unit_price') * F('quantity')) * F('service__tax__value') / 100, output_field=FloatField()), tax_prct=F('service__tax__value'), gross_value=ExpressionWrapper( (F('service__unit_price') * F('quantity')) + (F('service__unit_price') * F('quantity')) * F('service__tax__value') / 100, output_field=FloatField())) invoicem_material = Invoice_Material.objects.filter( invoice_id=id).annotate( service_name=F('material__name'), price_per_hour=ExpressionWrapper(F('material__price'), output_field=FloatField()), value=ExpressionWrapper(F('material__price') * F('quantity'), output_field=FloatField()), tax_value=ExpressionWrapper( (F('material__price') * F('quantity')) * F('material__tax__value') / 100, output_field=FloatField()), tax_prct=F('material__tax__value'), gross_value=ExpressionWrapper( (F('material__price') * F('quantity')) + (F('material__price') * F('quantity')) * F('material__tax__value') / 100, output_field=FloatField())) invoice_detail_object = chain(invoicem_material, invoicem_service) invoice_service_total = Invoice_Service.objects.filter( invoice_id=id).aggregate( total_tax=Sum((F('service__unit_price') * F('quantity')) * F('service__tax__value') / 100, output_field=FloatField()), total_net=Sum(F('service__unit_price') * F('quantity'), output_field=FloatField()), total_gross=Sum((F('service__unit_price') * F('quantity')) * F('service__tax__value') / 100 + F('service__unit_price') * F('quantity'), output_field=FloatField())) invoice_material_total = Invoice_Material.objects.filter( invoice_id=id).aggregate( total_tax=Sum((F('material__price') * F('quantity')) * F('material__tax__value') / 100, output_field=FloatField()), total_net=Sum(F('material__price') * F('quantity'), output_field=FloatField()), total_gross=Sum((F('material__price') * F('quantity')) * F('material__tax__value') / 100 + F('material__price') * F('quantity'), output_field=FloatField())) if invoice_service_total['total_tax'] is None: invoice_service_total['total_tax'] = 0 if invoice_material_total['total_tax'] is None: invoice_material_total['total_tax'] = 0 if invoice_service_total['total_net'] is None: invoice_service_total['total_net'] = 0 if invoice_material_total['total_net'] is None: invoice_material_total['total_net'] = 0 if invoice_service_total['total_gross'] is None: invoice_service_total['total_gross'] = 0 if invoice_material_total['total_gross'] is None: invoice_material_total['total_gross'] = 0 invoice_total_tax = invoice_service_total[ 'total_tax'] + invoice_material_total['total_tax'] invoice_total_net = invoice_service_total[ 'total_net'] + invoice_material_total['total_net'] invoice_total_gross = invoice_service_total[ 'total_gross'] + invoice_material_total['total_gross'] if int(invoice_total_gross) > 0: invoice_text_value_zl = val2text().translate( int(invoice_total_gross)) + ' złotych' else: invoice_text_value_zl = '' if (invoice_total_gross - int(invoice_total_gross)) > 0: invoice_text_value_gr = ' i ' + val2text().translate( int(round(100 * (invoice_total_gross % 1)))) + ' groszy' else: invoice_text_value_gr = '' invoice_text_value = invoice_text_value_zl + ' ' + invoice_text_value_gr return render( request, 'invoice_print.html', { 'invoice_object': invoice_object, 'invoice_detail_object': invoice_detail_object, 'invoice_total_tax': invoice_total_tax, 'invoice_total_net': invoice_total_net, 'invoice_total_gross': invoice_total_gross, 'invoice_text_value': invoice_text_value })
def create_similar_links(m): # if m.similar.first(): # return 1 t = m.movietags_set.filter(type='t').order_by('-n') mf = Movie.objects for tag in t: mf = mf.filter(movietags__tag=tag) mf = mf.annotate(comp=ExpressionWrapper(F('imdb_votes') * F('imdb_rating'), output_field=FloatField())) mf = mf.order_by('-comp')[0:20] ind = 0 for r in mf: if SimilarMovieRel.objects.filter(linkfrom=m, linkto=r, basedon='tmain'): continue if r.imdb_id == m.imdb_id: continue s = SimilarMovieRel(linkfrom=m, linkto=r, basedon='tmain', score=ind, votes=0) ind += 1 s.save() return 1
def do_filter_ranking(self, engine_slug, queryset, search_text): """Ranks the given queryset according to the relevance of the given search text.""" return queryset.annotate( watson_rank=Value(1.0, output_field=FloatField()))
def process_math(query: QuerySet, entity: Entity) -> QuerySet: math_to_aggregate_function = {"sum": Sum, "avg": Avg, "min": Min, "max": Max} if entity.math == "dau": # In daily active users mode count only up to 1 event per user per day query = query.annotate(count=Count("person_id", distinct=True)) elif entity.math in math_to_aggregate_function: # Run relevant aggregate function on specified event property, casting it to a double query = query.annotate( count=math_to_aggregate_function[entity.math]( Cast(RawSQL('"posthog_event"."properties"->>%s', (entity.math_property,)), output_field=FloatField(),) ) ) # Skip over events where the specified property is not set or not a number # It may not be ideally clear to the user what events were skipped, # but in the absence of typing, this is safe, cheap, and frictionless query = query.extra( where=['jsonb_typeof("posthog_event"."properties"->%s) = \'number\''], params=[entity.math_property], ) return query
class Item(Model, StrMixin): item_id = IntegerField(primary_key=True, db_column='typeID') group = ForeignKey('ItemGroup', null=True, db_column='groupID', blank=True, related_name='items') name = CharField(max_length=100, db_column='typeName', blank=True) description = TextField(max_length=3000, blank=True) mass = FloatField(blank=True) # #This field type is a guess. volume = FloatField(blank=True) # #This field type is a guess. capacity = FloatField(blank=True) # #This field type is a guess. portion_size = IntegerField(null=True, db_column='portionSize', blank=True) race = ForeignKey('Race', null=True, db_column='raceID', blank=True, related_name='racial_items') base_price = FloatField(db_column='basePrice', blank=True) #This field type is a guess. is_published = IntegerField(null=True, blank=True, db_column='published') marketgroup = ForeignKey('MarketGroup', null=True, db_column='marketGroupID', blank=True, related_name='items') chance_of_duplicating = FloatField( db_column='chanceOfDuplicating', blank=True) #This field type is a guess. objects = filtered_manager() published = filtered_manager(is_published=1) unpublished = filtered_manager(is_published=0) class Meta: app_label = 'eve_sde' db_table = 'invTypes' @property def moon_mining_amount(self): #only applies to some items.. shouldn't really be on this model if self.group_id in (334, 427, 428, 429, 536, 712, 873, 913, 964, 967): try: amount = self.attributes.get(attribute_id=726).value except: amount = 0 else: amount = 0 return amount @property def reprocessed_materials(self): return dict((requirement.material, requirement.quantity) for requirement in self.required_materials.all().select_related('material')) @property def recycled_inputs(self): return dict( (requirement.material, requirement.quantity) for requirement in self.blueprint.extra_materials.all().select_related('material') if requirement.recycle) @property def recycled_input_materials(self): recycled_materials = defaultdict(float) for requirement, count in self.recycled_inputs.items(): for material, quantity in requirement.reprocessed_materials.items( ): recycled_materials[material] = recycled_materials[material] + ( quantity * count) return dict(recycled_materials) def real_material_requirements(self, me=0, skill=5): real_requirements = defaultdict(float) recycled_materials = self.recycled_input_materials for requirement in self.required_materials.all().select_related( 'material'): material = requirement.material real_quantity = requirement.real_quantity( me=me, skill=skill) - recycled_materials.get(material, 0) real_requirements[material] = real_quantity return dict((material, quantity) for material, quantity in real_requirements.items() if quantity > 0.0) def real_extra_materials(self): real_requirements = defaultdict(float) for requirement in self.blueprint.extra_materials.all().select_related( 'material'): material = requirement.material real_requirements[material] = (requirement.quantity * requirement.damage_per_job) return dict(real_requirements) def build_info(self, me=0, skill=5): data = { "material_requirements": self.real_material_requirements(me=me, skill=skill), "extra_materials": self.real_extra_materials(), } return data
class SQLQuery(models.Model): query = TextField() start_time = DateTimeField(null=True, blank=True, default=timezone.now) end_time = DateTimeField(null=True, blank=True) time_taken = FloatField(blank=True, null=True) request = ForeignKey( Request, related_name='queries', null=True, blank=True, db_index=True, on_delete=models.CASCADE, ) traceback = TextField() objects = SQLQueryManager() # TODO docstring @property def traceback_ln_only(self): return '\n'.join(self.traceback.split('\n')[::2]) @property def formatted_query(self): return sqlparse.format(self.query, reindent=True, keyword_case='upper') # TODO: Surely a better way to handle this? May return false positives @property def num_joins(self): return self.query.lower().count('join ') @property def tables_involved(self): """ A really another rudimentary way to work out tables involved in a query. TODO: Can probably parse the SQL using sqlparse etc and pull out table info that way? """ components = [x.strip() for x in self.query.split()] tables = [] for idx, component in enumerate(components): # TODO: If django uses aliases on column names they will be falsely # identified as tables... if component.lower() == 'from' or component.lower( ) == 'join' or component.lower() == 'as': try: _next = components[idx + 1] if not _next.startswith('('): # Subquery stripped = _next.strip().strip(',') if stripped: tables.append(stripped) except IndexError: # Reach the end pass return tables @atomic() def save(self, *args, **kwargs): if self.end_time and self.start_time: interval = self.end_time - self.start_time self.time_taken = interval.total_seconds() * 1000 if not self.pk: if self.request: self.request.num_sql_queries += 1 self.request.save(update_fields=['num_sql_queries']) super(SQLQuery, self).save(*args, **kwargs) @atomic() def delete(self, *args, **kwargs): self.request.num_sql_queries -= 1 self.request.save() super(SQLQuery, self).delete(*args, **kwargs)
class FormFromModelTest(Model): f_int = IntegerField() f_float = FloatField() f_bool = BooleanField() f_int_excluded = IntegerField()
class CellValue(Model): class Meta: verbose_name = 'Значение ячейки' verbose_name_plural = 'Значения ячеек' int_value = IntegerField(verbose_name='Значение', null=True, blank=True) float_value = FloatField(verbose_name='Значение', null=True, blank=True) bool_value = BooleanField(verbose_name='Значение', null=True, blank=True) text_value = TextField(verbose_name='Значение', null=True, blank=True) char50_value = CharField(verbose_name='Значение', max_length=50, null=True, blank=True) char200_value = CharField(verbose_name='Значение', max_length=200, null=True, blank=True) date_value = DateField(verbose_name='Значение', null=True, blank=True) datetime_value = DateTimeField(verbose_name='Значение', null=True, blank=True) ref_value = ForeignKey('Cell', verbose_name='Значение', null=True, blank=True, on_delete=CASCADE) formula_value = CharField(verbose_name='Значение', max_length=200, null=True, blank=True) def set_value(self, column_type, value): if column_type == INTEGER: self.int_value = value elif column_type == FLOAT: self.float_value = value elif column_type == BOOLEAN: self.bool_value = value elif column_type == TEXT: self.text_value = value elif column_type == CHAR50: self.char50_value = value elif column_type == CHAR200: self.char200_value = value elif column_type == DATE: self.date_value = value elif column_type == DATETIME: self.datetime_value = value elif column_type == REFERENCE: self.ref_value = Cell.objects.filter(pk=value).first() elif column_type == FORMULA: self.formula_value = value def get_value(self, column_type): if column_type == INTEGER: return self.int_value elif column_type == FLOAT: return self.float_value elif column_type == BOOLEAN: return self.bool_value elif column_type == TEXT: return self.text_value elif column_type == CHAR50: return self.char50_value elif column_type == CHAR200: return self.char200_value elif column_type == DATE: return self.date_value elif column_type == DATETIME: return self.datetime_value elif column_type == REFERENCE: return self.ref_value elif column_type == FORMULA: return self.formula_value
class Town(Model): name = CharField(_(u'Name of the town'), max_length=100, blank=False, null=False) slug = SlugField(_(u'Slugified name of the town'), max_length=100, blank=False, null=False) zipcode = CharField(_(u'Zip code'), max_length=100, blank=True) country = CharField(_(u'Country'), max_length=40, blank=True) latitude = FloatField(verbose_name=_(u'Latitude')) longitude = FloatField(verbose_name=_(u'Longitude')) creation_label = _(u'Create a town') class Meta: app_label = 'geolocation' verbose_name = _(u'Town') verbose_name_plural = _(u'Towns') ordering = ('name', ) def __str__(self): return u'{} {} {}'.format(self.zipcode, self.name, self.country) def save(self, *args, **kwargs): self.slug = slugify(self.name) # super(Town, self).save(*args, **kwargs) super().save(*args, **kwargs) @classmethod def search(cls, address): zipcode = address.zipcode slug = slugify(address.city) if address.city else None query_filter = None towns = Town.objects.order_by('zipcode') if zipcode: query_filter = Q(zipcode=zipcode) elif slug: query_filter = Q(slug=slug) if not query_filter: return None towns = list(towns.filter(query_filter)) if len(towns) > 1 and slug: # towns = filter(lambda c: c.slug == slug, towns)[:1] return next((t for t in towns if t.slug == slug), None) return towns[0] if len(towns) == 1 else None @classmethod def search_all(cls, addresses): candidates = list( Town.objects.filter( Q(zipcode__in=(a.zipcode for a in addresses if a.zipcode)) | Q(slug__in=(slugify(a.city) for a in addresses if a.city))).order_by('zipcode')) cities = { key: list(c) for key, c in groupby(candidates, lambda c: c.slug) } zipcodes = { key: list(c) for key, c in groupby(candidates, lambda c: c.zipcode) } get_city = cities.get get_zipcode = zipcodes.get for address in addresses: zipcode = address.zipcode slug = slugify(address.city) if address.city else None towns = [] if zipcode: towns = get_zipcode(zipcode, []) elif slug: towns = get_city(slug, []) # if len(towns) > 1 and slug: # towns = filter(lambda c: c.slug == slug, towns)[:1] # # yield towns[0] if len(towns) == 1 else None if len(towns) > 1 and slug: yield next((t for t in towns if t.slug == slug), None) else: yield towns[0] if len(towns) == 1 else None
class GeoAddress(Model): UNDEFINED = 0 MANUAL = 1 PARTIAL = 2 COMPLETE = 3 STATUS_LABELS = { UNDEFINED: _(u'Not localized'), MANUAL: _(u'Manual location'), PARTIAL: _(u'Partially matching location'), COMPLETE: '', } address = OneToOneField( settings.PERSONS_ADDRESS_MODEL, verbose_name=_(u'Address'), primary_key=True, on_delete=CASCADE, ) latitude = FloatField(verbose_name=_(u'Latitude'), null=True, blank=True) # min_value=-90, max_value=90 longitude = FloatField(verbose_name=_(u'Longitude'), null=True, blank=True) # min_value=-180, max_value=180 draggable = BooleanField( verbose_name=_(u'Is this marker draggable in maps ?'), default=True) geocoded = BooleanField(verbose_name=_(u'Geocoded from address ?'), default=False) status = SmallIntegerField( verbose_name=pgettext_lazy('geolocation', u'Status'), choices=STATUS_LABELS.items(), default=UNDEFINED, ) creation_label = pgettext_lazy('geolocation-address', u'Create an address') class Meta: app_label = 'geolocation' verbose_name = pgettext_lazy('geolocation-address', u'Address') verbose_name_plural = pgettext_lazy('geolocation-address', u'Addresses') ordering = ('address_id', ) def __init__(self, *args, **kwargs): # super(GeoAddress, self).__init__(*args, **kwargs) super().__init__(*args, **kwargs) self._neighbours = {} @property def is_complete(self): return self.status == self.COMPLETE @classmethod def get_geoaddress(cls, address): try: geoaddress = address.geoaddress except GeoAddress.DoesNotExist: geoaddress = None if geoaddress is None: geoaddress = GeoAddress(address=address) return geoaddress @classmethod def populate_geoaddress(cls, address): try: geoaddress = address.geoaddress except GeoAddress.DoesNotExist: geoaddress = GeoAddress(address=address) geoaddress.set_town_position(Town.search(address)) geoaddress.save() return geoaddress @classmethod def populate_geoaddresses(cls, addresses): for addresses in iter_as_slices(addresses, 50): create = [] update = [] for address in addresses: try: geoaddress = address.geoaddress if geoaddress.latitude is None: update.append(geoaddress) except GeoAddress.DoesNotExist: create.append(GeoAddress(address=address)) towns = Town.search_all( [geo.address for geo in chain(create, update)]) for geoaddress, town in zip(chain(create, update), towns): geoaddress.set_town_position(town) GeoAddress.objects.bulk_create(create) # TODO: only if has changed with atomic(): for geoaddress in update: geoaddress.save(force_update=True) def set_town_position(self, town): if town is not None: self.latitude = town.latitude self.longitude = town.longitude self.status = GeoAddress.PARTIAL else: self.latitude = None self.longitude = None self.status = GeoAddress.UNDEFINED def update(self, **kwargs): update_model_instance(self, **kwargs) def neighbours(self, distance): neighbours = self._neighbours.get(distance) if neighbours is None: self._neighbours[distance] = neighbours = self._get_neighbours( distance) return neighbours def _get_neighbours(self, distance): latitude = self.latitude longitude = self.longitude if latitude is None and longitude is None: return GeoAddress.objects.none() upper_left, lower_right = location_bounding_box( latitude, longitude, distance) return GeoAddress.objects.exclude(address_id=self.address.pk)\ .exclude(address__object_id=self.address.object_id)\ .filter(latitude__range=(upper_left[0], lower_right[0]), longitude__range=(upper_left[1], lower_right[1])) def __str__(self): return u'GeoAddress(lat={}, lon={}, status={})'.format( self.latitude, self.longitude, self.status)
def leaderboard(request, challenge_phase_split_id): """Returns leaderboard for a corresponding Challenge Phase Split""" # check if the challenge exists or not try: challenge_phase_split = ChallengePhaseSplit.objects.get( pk=challenge_phase_split_id) except ChallengePhaseSplit.DoesNotExist: response_data = {'error': 'Challenge Phase Split does not exist'} return Response(response_data, status=status.HTTP_400_BAD_REQUEST) # Check if the Challenge Phase Split is publicly visible or not if challenge_phase_split.visibility != ChallengePhaseSplit.PUBLIC: response_data = { 'error': 'Sorry, leaderboard is not public yet for this Challenge Phase Split!' } return Response(response_data, status=status.HTTP_400_BAD_REQUEST) # Get the leaderboard associated with the Challenge Phase Split leaderboard = challenge_phase_split.leaderboard # Get the default order by key to rank the entries on the leaderboard try: default_order_by = leaderboard.schema['default_order_by'] except: response_data = { 'error': 'Sorry, Default filtering key not found in leaderboard schema!' } return Response(response_data, status=status.HTTP_400_BAD_REQUEST) # Exclude the submissions done by members of the host team # while populating leaderboard challenge_obj = challenge_phase_split.challenge_phase.challenge challenge_hosts_emails = challenge_obj.creator.get_all_challenge_host_email( ) leaderboard_data = LeaderboardData.objects.exclude( submission__created_by__email__in=challenge_hosts_emails) # Get all the successful submissions related to the challenge phase split leaderboard_data = leaderboard_data.filter( challenge_phase_split=challenge_phase_split, submission__is_public=True, submission__is_flagged=False, submission__status=Submission.FINISHED).order_by('created_at') leaderboard_data = leaderboard_data.annotate( filtering_score=RawSQL('result->>%s', (default_order_by, ), output_field=FloatField())).values( 'id', 'submission__participant_team__team_name', 'challenge_phase_split', 'result', 'filtering_score', 'leaderboard__schema', 'submission__submitted_at') sorted_leaderboard_data = sorted(leaderboard_data, key=lambda k: float(k['filtering_score']), reverse=True) distinct_sorted_leaderboard_data = [] team_list = [] for data in sorted_leaderboard_data: if data['submission__participant_team__team_name'] in team_list: continue else: distinct_sorted_leaderboard_data.append(data) team_list.append(data['submission__participant_team__team_name']) leaderboard_labels = challenge_phase_split.leaderboard.schema['labels'] for item in distinct_sorted_leaderboard_data: item['result'] = [ item['result'][index] for index in leaderboard_labels ] paginator, result_page = paginated_queryset( distinct_sorted_leaderboard_data, request, pagination_class=StandardResultSetPagination()) response_data = result_page return paginator.get_paginated_response(response_data)
class Median(Aggregate): function = 'PERCENTILE_CONT' name = 'median' output_field = FloatField() template = '%(function)s(0.5) WITHIN GROUP (ORDER BY %(expressions)s)'
def my_basket(request): __user = request.user __strFilter = request.GET.get('search', '') __strsortby = request.GET.get('sortby', '') __strdir = request.GET.get('dir', '') if request.method == 'POST': if 'btnLikes' in request.POST: __strID = request.POST.get("id", '0') __product = Products.objects.get(pk=int(__strID)) pl = ProductsLikes(user=__user, products=__product) pl.save() elif 'btnDelete' in request.POST: __strID = request.POST.get("id", '0') tmpc = tempCar.objects.get(pk=int(__strID)) tmpc.delete() elif 'btnProcessCar' in request.POST: tcs = tempCar.objects.filter(user=__user) if tcs.exists(): doc = Documents(user=__user, description="online sell", type='out', subType="sell") doc.save() for tc in tcs: dd = DocumentsDetails() dd.type = "out" dd.price = tc.products.price dd.quantity = tc.quantity dd.documents = doc dd.user = request.user dd.products = tc.products dd.save() tmpc = tempCar.objects.filter(user=__user) for t in tmpc: tt = tempCar.objects.get(pk=t.pk) tt.delete() messages.success(request, 'Thanks for you purchase :)') else: messages.info(request, 'add products to your basket firt') car_list = tempCar.objects.annotate( likes=Count('products__productslikes'), cost=Cast(F('quantity') * F('products__price'), FloatField())).filter(user=__user, products__name__contains=__strFilter) if __strsortby == 'name': __strsortby = 'products__name' if __strdir == "desc": __strsortby = "-" + __strsortby car_list = tempCar.objects.annotate( likes=Count('products__productslikes'), cost=Cast( F('quantity') * F('products__price'), FloatField())).filter( user=__user, products__name__contains=__strFilter).order_by(__strsortby) if __strsortby == 'likes': if __strdir == "desc": __strsortby = "-" + __strsortby car_list = tempCar.objects.annotate( likes=Count('products__productslikes'), cost=Cast( F('quantity') * F('products__price'), FloatField())).filter( user=__user, products__name__contains=__strFilter).order_by(__strsortby) page = request.GET.get('page', 1) total = car_list.aggregate(Sum('cost'))['cost__sum'] if total is None: total = 0.0 paginator = Paginator(car_list, 10) try: products = paginator.page(page) except PageNotAnInteger: products = paginator.page(1) except EmptyPage: products = paginator.page(paginator.num_pages) return render(request, 'store/mybasket.html', { 'products': products, 'total': total })
class KeyTransform(Transform): SPEC_MAP = { date: 'DATE', datetime: 'DATETIME', float: 'DOUBLE', int: 'INTEGER', six.text_type: 'CHAR', time: 'TIME', dict: 'BINARY', } if six.PY2: from __builtin__ import long # make source lintable on Python 3 SPEC_MAP[long] = 'INTEGER' SPEC_MAP_NAMES = ', '.join( sorted(x.__name__ for x in six.iterkeys(SPEC_MAP))) TYPE_MAP = { 'BINARY': DynamicField, 'CHAR': TextField(), 'DATE': DateField(), 'DATETIME': DateTimeField(), 'DOUBLE': FloatField(), 'INTEGER': IntegerField(), 'TIME': TimeField(), } def __init__(self, key_name, data_type, *args, **kwargs): subspec = kwargs.pop('subspec', None) super(KeyTransform, self).__init__(*args, **kwargs) self.key_name = key_name self.data_type = data_type try: output_field = self.TYPE_MAP[data_type] except KeyError: # pragma: no cover raise ValueError("Invalid data_type '{}'".format(data_type)) if data_type == 'BINARY': self.output_field = output_field(spec=subspec) else: self.output_field = output_field def as_sql(self, compiler, connection): lhs, params = compiler.compile(self.lhs) return ( "COLUMN_GET({}, %s AS {})".format(lhs, self.data_type), params + [self.key_name], ) if django.VERSION[:3] <= (1, 8, 2): # pragma: no cover # Backport of bugfix for transforms with arguments, taken from: # https://code.djangoproject.com/ticket/24744 def copy(self): return copy(self) def relabeled_clone(self, relabels): copy = self.copy() copy.lhs = self.lhs.relabeled_clone(relabels) return copy
def get(self, request, format=None): uid = request.user.id type = request.GET.get('type', None) if type is None: return Response({'error': "Parameter errors"}, status=status.HTTP_400_BAD_REQUEST) print(uid) if type == 'recite': try: todayLeftPlans = Plan.objects.select_related('word').filter(user=uid, date=datetime.date.today(), isChecked=False, times__lt=maxTimes)[:batchNum] print(todayLeftPlans.values_list()) if(todayLeftPlans.count()>0): words = [p.word for p in todayLeftPlans] else: sub = Subscription.objects.get(user=uid) targetNum = sub.targetNumber wordbook = sub.wordbook todayDoneWords = Plan.objects.filter(Q(user=uid), Q(date=datetime.date.today()), Q(isChecked=True) | Q(times=maxTimes)).values_list('word') oldWords = Recitation.objects.annotate(priority=ExpressionWrapper( F('successTimeCount') / (F('recitedTimeCount')+0.1), output_field=FloatField() )).filter(user=uid, word__wordbook= wordbook) oldWords1 = oldWords.filter(successTimeCount__lt=F('recitedTimeCount') * 0.7) oldWords2 = oldWords.filter(successTimeCount__gte=F('recitedTimeCount') * 0.7, lastRecitedTime__lt=datetime.date.today()-F('duration')) print(oldWords2.values('word_id', 'word__content')) # TODO: 调整优先级 oldWords = oldWords1.union(oldWords2).exclude(word__in=set(todayDoneWords)).order_by('-priority')[:int(targetNum*0.8)] print(oldWords.values()) for oldWord in oldWords: Plan.objects.create(user=request.user, word=oldWord.word) oldWordNum = oldWords.count() if oldWordNum <targetNum: wordBook = Subscription.objects.get(user=uid).wordbook recitedWord = list(Recitation.objects.filter(user=uid)) recitedWords = [] for rw in recitedWord: recitedWords.append(rw.word_id) newWord = Word.objects.filter(wordbook=wordBook).exclude(id__in=recitedWords)[:targetNum-oldWordNum] for w in newWord: Recitation.objects.create(user=request.user, word=w) Plan.objects.create(user=request.user, word=w) todayLeftPlans = Plan.objects.filter(user=uid, date=datetime.date.today(), isChecked=False, times__lt=maxTimes)[:batchNum] words = [p.word for p in todayLeftPlans] except Exception as e: print(e) return Response({'error': str(e)}, status=status.HTTP_404_NOT_FOUND) elif type == 'review': try: sub = Subscription.objects.get(user=uid) targetNum = sub.targetNumber wordbook = sub.wordbook todayLeftPlans = Plan.objects.select_related('word').filter(user=uid, date=datetime.date.today(), isChecked=False, times__lt=maxTimes)[:batchNum] print(todayLeftPlans.values_list()) if(todayLeftPlans.count()>0): words = [p.word for p in todayLeftPlans] else: oldWords = Recitation.objects.annotate(priority=ExpressionWrapper( F('successTimeCount') / (F('recitedTimeCount')+0.1), output_field=FloatField() )).filter(user=uid, word__wordbook=wordbook).order_by('-priority')[:targetNum] for oldWord in oldWords: Plan.objects.create(user=request.user, word=oldWord.word) todayLeftPlans = Plan.objects.filter(user=uid, date=datetime.date.today(), isChecked=False, times__lt=maxTimes)[:batchNum] words = [p.word for p in todayLeftPlans] except Exception as e: print(e) return Response({'error': str(e)}, status=status.HTTP_404_NOT_FOUND) wordDetails = [] try: for w in words: wordDetail = {} wordDetail['id'] = w.id wordDetail['content'] = w.content wordDetail['phonetic'] = w.phonetic wordDetail['definition'] = w.definition.split('\n') wordDetail['translation'] = w.translation.split('\n') wordDetails.append(wordDetail) except Exception as e: print(e) return Response({'error': str(e)}, status=status.HTTP_404_NOT_FOUND) return Response(wordDetails, status=status.HTTP_200_OK)
def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) if not (self.object.ended or self.can_edit): raise Http404() queryset = Submission.objects.filter(contest_object=self.object) ac_count = Count( Case(When(result='AC', then=Value(1)), output_field=IntegerField())) ac_rate = CombinedExpression(ac_count / Count('problem'), '*', Value(100.0), output_field=FloatField()) status_count_queryset = list( queryset.values('problem__code', 'result').annotate( count=Count('result')).values_list('problem__code', 'result', 'count'), ) labels, codes = [], [] contest_problems = self.object.contest_problems.order_by( 'order').values_list('problem__name', 'problem__code') if contest_problems: labels, codes = zip(*contest_problems) num_problems = len(labels) status_counts = [[] for i in range(num_problems)] for problem_code, result, count in status_count_queryset: if problem_code in codes: status_counts[codes.index(problem_code)].append( (result, count)) result_data = defaultdict(partial(list, [0] * num_problems)) for i in range(num_problems): for category in _get_result_data(defaultdict( int, status_counts[i]))['categories']: result_data[category['code']][i] = category['count'] stats = { 'problem_status_count': { 'labels': labels, 'datasets': [{ 'label': name, 'backgroundColor': settings.DMOJ_STATS_SUBMISSION_RESULT_COLORS[name], 'data': data, } for name, data in result_data.items()], }, 'problem_ac_rate': get_bar_chart( queryset.values( 'contest__problem__order', 'problem__name').annotate(ac_rate=ac_rate).order_by( 'contest__problem__order').values_list( 'problem__name', 'ac_rate'), ), 'language_count': get_pie_chart( queryset.values('language__name').annotate( count=Count('language__name')).filter( count__gt=0).order_by('-count').values_list( 'language__name', 'count'), ), 'language_ac_rate': get_bar_chart( queryset.values('language__name').annotate( ac_rate=ac_rate).filter(ac_rate__gt=0).values_list( 'language__name', 'ac_rate'), ), } context['stats'] = mark_safe(json.dumps(stats)) return context
class Cardinality(Aggregate): function = 'hll_cardinality' output_field = FloatField()
def get(self, request, *args, **kwargs): count = { 'order_nums': 0, 'cart_nums': 0, 'cart_money': 0, } user_id = request.session.get('openid', None) order = request.GET.get('order', None) cart = request.GET.get('cart', None) is_member = request.session.get('is_member', None) if order and int(order) == 1: count['order_nums'] = Order.objects.filter(user_id=user_id, status=0).count() if cart and int(cart) == 1: goods_nums = ShopCart.objects.filter(user_id = user_id)\ .aggregate( goods_sum = Sum('quantity'), price_totals = Sum(F('goods__price') * F('quantity'), output_field=FloatField()), benefits_totals = Sum(F('goods__benefits') * F('quantity'), output_field=FloatField()) ) if goods_nums.get('goods_sum'): count['cart_nums'] = goods_nums.get('goods_sum') count['cart_money'] = goods_nums.get( 'benefits_totals') if is_member == 1 else goods_nums.get( 'price_totals') return Response(count)
def dashboard_compare_tests_list(tests_list): '''Return comparasion data for dashboard''' tests = [] for t in tests_list: test_id = t['id'] project_id = t['project_id'] project_tests = Test.objects.filter( project_id=project_id, id__lte=test_id).order_by('-start_time') if project_tests.count() > 1: prev_test_id = project_tests[1].id else: prev_test_id = test_id test_data = TestActionAggregateData.objects.filter(test_id=test_id). \ annotate(errors=RawSQL("((data->>%s)::numeric)", ('errors',))). \ annotate(count=RawSQL("((data->>%s)::numeric)", ('count',))). \ annotate(weight=RawSQL("((data->>%s)::numeric)", ('weight',))). \ aggregate( count_sum=Sum(F('count'), output_field=FloatField()), errors_sum=Sum(F('errors'), output_field=FloatField()), overall_avg = Sum(F('weight'))/Sum(F('count')) ) prev_test_data = TestActionAggregateData.objects.filter(test_id=prev_test_id). \ annotate(errors=RawSQL("((data->>%s)::numeric)", ('errors',))). \ annotate(count=RawSQL("((data->>%s)::numeric)", ('count',))). \ annotate(weight=RawSQL("((data->>%s)::numeric)", ('weight',))). \ aggregate( count_sum=Sum(F('count'), output_field=FloatField()), errors_sum=Sum(F('errors'), output_field=FloatField()), overall_avg = Sum(F('weight'))/Sum(F('count')) ) try: errors_percentage = test_data['errors_sum'] * 100 / test_data[ 'count_sum'] except (TypeError, ZeroDivisionError) as e: logger.error(e) errors_percentage = 0 success_requests = 100 - errors_percentage # TODO: improve this part if success_requests >= 98: result = 'success' elif success_requests < 98 and success_requests >= 95: result = 'warning' else: result = 'danger' tests.append({ 'project_name': t['project__project_name'], 'display_name': t['display_name'], 'parameters': t['parameters'], 'start_time': t['start_time'], 'success_requests': success_requests, 'test_avg_response_times': test_data['overall_avg'], 'prev_test_avg_response_times': prev_test_data['overall_avg'], 'result': result, }) return tests
class Request(models.Model): id = CharField(max_length=36, default=uuid4, primary_key=True) path = CharField(max_length=190, db_index=True) query_params = TextField(blank=True, default='') raw_body = TextField(blank=True, default='') body = TextField(blank=True, default='') method = CharField(max_length=10) start_time = DateTimeField(default=timezone.now, db_index=True) view_name = CharField(max_length=190, db_index=True, blank=True, default='', null=True) end_time = DateTimeField(null=True, blank=True) time_taken = FloatField(blank=True, null=True) encoded_headers = TextField(blank=True, default='') # stores json meta_time = FloatField(null=True, blank=True) meta_num_queries = IntegerField(null=True, blank=True) meta_time_spent_queries = FloatField(null=True, blank=True) pyprofile = TextField(blank=True, default='') prof_file = FileField(max_length=300, blank=True, storage=silk_storage) @property def total_meta_time(self): return (self.meta_time or 0) + (self.meta_time_spent_queries or 0) @property def profile_table(self): for n, columns in enumerate(parse_profile(self.pyprofile)): location = columns[-1] if n and '{' not in location and '<' not in location: r = re.compile('(?P<src>.*\.py)\:(?P<num>[0-9]+).*') m = r.search(location) group = m.groupdict() src = group['src'] num = group['num'] name = 'c%d' % n fmt = '<a name={name} href="?pos={n}&file_path={src}&line_num={num}#{name}">{location}</a>' rep = fmt.format(**dict(group, **locals())) yield columns[:-1] + [mark_safe(rep)] else: yield columns # defined in atomic transaction within SQLQuery save()/delete() as well # as in bulk_create of SQLQueryManager # TODO: This is probably a bad way to do this, .count() will prob do? num_sql_queries = IntegerField(default=0) # TODO replace with count() @property def time_spent_on_sql_queries(self): """ TODO: Perhaps there is a nicer way to do this with Django aggregates? My initial thought was to perform: SQLQuery.objects.filter.aggregate(Sum(F('end_time')) - Sum(F('start_time'))) However this feature isnt available yet, however there has been talk for use of F objects within aggregates for four years here: https://code.djangoproject.com/ticket/14030. It looks like this will go in soon at which point this should be changed. """ return sum(x.time_taken for x in SQLQuery.objects.filter(request=self)) @property def headers(self): if self.encoded_headers: raw = json.loads(self.encoded_headers) else: raw = {} return CaseInsensitiveDictionary(raw) @property def content_type(self): return self.headers.get('content-type', None) @classmethod def garbage_collect(cls, force=False): """ Remove Request/Responses when we are at the SILKY_MAX_RECORDED_REQUESTS limit Note that multiple in-flight requests may call this at once causing a double collection """ check_percent = SilkyConfig().SILKY_MAX_RECORDED_REQUESTS_CHECK_PERCENT check_percent /= 100.0 if check_percent < random.random() and not force: return target_count = SilkyConfig().SILKY_MAX_RECORDED_REQUESTS # Since garbage collection is probabilistic, the target count should # be lowered to account for requests before the next garbage collection if check_percent != 0: target_count -= int(1 / check_percent) prune_count = max(cls.objects.count() - target_count, 0) prune_rows = cls.objects.order_by('start_time') \ .values_list('id', flat=True)[:prune_count] cls.objects.filter(id__in=list(prune_rows)).delete() def save(self, *args, **kwargs): # sometimes django requests return the body as 'None' if self.raw_body is None: self.raw_body = '' if self.body is None: self.body = '' if self.end_time and self.start_time: interval = self.end_time - self.start_time self.time_taken = interval.total_seconds() * 1000 super(Request, self).save(*args, **kwargs) Request.garbage_collect(force=False)
def get_best_rated(self): return self.values('movie').annotate( rate=Sum('rate') / Count('movie', output_field=FloatField())).order_by('-rate')
def get_aggregate_total(query: QuerySet, entity: Entity) -> int: entity_total = 0 if entity.math == "dau": _query, _params = query.query.sql_with_params() with connection.cursor() as cursor: cursor.execute("SELECT count(DISTINCT person_id) FROM ({}) as aggregates".format(_query), _params) entity_total = cursor.fetchall()[0][0] elif entity.math in MATH_TO_AGGREGATE_FUNCTION: query = query.annotate( math_prop=Cast( RawSQL('"posthog_event"."properties"->>%s', (entity.math_property,)), output_field=FloatField(), ) ) query = query.extra( where=['jsonb_typeof("posthog_event"."properties"->%s) = \'number\''], params=[entity.math_property], ) _query, _params = query.query.sql_with_params() with connection.cursor() as cursor: agg_func = MATH_TO_AGGREGATE_STRING[entity.math].format(math_prop="math_prop") cursor.execute("SELECT {} FROM ({}) as aggregates".format(agg_func, _query), (_params)) entity_total = cursor.fetchall()[0][0] else: entity_total = len(query) return entity_total
def get_queryset(self, pk=None): """All videos except for null ones.""" queryset = Video.objects.filter(is_unlisted=False).values() request = self.request fields = [x.name for x in Video._meta.fields] for f in VIDEO_FIELDS: fields.remove(f) def get_score_annotation(user_preferences_vector): """Returns an sql object annotating queries with the video ratings (sclar product).""" return sum([ F(f) * v for f, v in zip(VIDEO_FIELDS, user_preferences_vector) ]) features = self.get_features_from_request() default_features = [ constants['DEFAULT_PREFS_VAL'] for _ in VIDEO_FIELDS ] search_username = self.need_scores_for_username() # computing score inside the database if search_username: queryset = queryset.values(*fields) queryset = queryset.annotate( **{key: F(f'videorating__{key}') for key in VIDEO_FIELDS}, user=F('videorating__user__user__username')).filter( user=search_username) # for myself, allow showing public/non-public videos if search_username == request.user.username: is_public = request.query_params.get('show_all_my_videos', 'true') == 'false' print(is_public) else: # for other people, only show public videos is_public = True # keeping only public videos if is_public: queryset = VideoRatingPrivacy._annotate_privacy( queryset, prefix='videoratingprivacy', field_user=None, filter_add={ 'videoratingprivacy__user__user__username': search_username }) queryset = queryset.filter(_is_public=True) queryset = queryset.annotate( rating_n_experts=Value(1, IntegerField())) q1 = Q(expertrating_video_1__user__user__username=search_username) q2 = Q(expertrating_video_2__user__user__username=search_username) c1 = Count('expertrating_video_1', q1, distinct=True) c2 = Count('expertrating_video_2', q2, distinct=True) queryset = queryset.annotate(rating_n_ratings=c1 + c2) # logging model usage in search if self.request.user.is_authenticated: RepresentativeModelUsage.objects.get_or_create( viewer=UserPreferences.objects.get( user__username=self.request.user.username), model=UserPreferences.objects.get( user__username=search_username)) queryset = queryset.annotate( score_preferences_term=get_score_annotation(features)) queryset = queryset.annotate( tournesol_score=get_score_annotation(default_features)) queryset = queryset.annotate( score_search_term_=Value(0.0, FloatField())) if request.query_params.get('search'): # computing the postgres score for search if connection.vendor.startswith('postgres'): s_query = request.query_params.get('search', '') def word_to_query(w): """Convert one word into a query.""" queries = [] queries.append(SearchQuery(w, search_type='raw')) queries.append(SearchQuery(w + ':*', search_type='raw')) return reduce(lambda x, y: x | y, queries) def words_to_query(s_query, max_len=100, max_word_len=20): """Convert a string with words into a SearchQuery.""" s_query = s_query[:max_len] s_query = s_query.split(' ') s_query = [ ''.join(filter(str.isalnum, x)) for x in s_query ] s_query = [ x for x in s_query if 1 <= len(x) <= max_word_len ] s_query = [word_to_query(x) for x in s_query] if not s_query: return SearchQuery('') return reduce(lambda x, y: x & y, s_query) s_query = words_to_query(s_query) s_vectors = [ SearchVector(f, weight=w) for f, w in zip(self.search_fields, self.search_weights) ] s_vector = reduce(lambda x, y: x + y, s_vectors) queryset = queryset.annotate( score_search_term_=SearchRank(s_vector, s_query)) else: # in other databases, using basic filtering queryset = filters_.SearchFilter().filter_queryset( self.request, queryset, self) queryset = queryset.annotate( score_search_term_=Value(1.0, FloatField())) queryset = queryset.annotate( score_search_term=F('score_search_term_') * VideoSearchEngine.VIDEO_SEARCH_COEFF) queryset = queryset.annotate(score=F('score_preferences_term') + F('score_search_term')) return queryset
def session_stats(session): data_extras = session.datasets.values( key=F('kind__name')).order_by('key').annotate( count=Count('id'), time=Sum(F('exposure_time') * F('num_frames'), output_field=FloatField()), frames=Sum('num_frames'), ) data_stats = [[ 'Avg Frames/{}'.format(info['key']), round(info['frames'] / info['count'], 1) ] for info in data_extras] data_counts = [[info['key'], round(info['count'], 1)] for info in data_extras] data_info = session.datasets.values('exposure_time', 'attenuation', 'energy', 'num_frames') report_info = AnalysisReport.objects.filter( data__session=session).values('score') param_histograms = make_parameter_histogram(data_info, report_info) shutters = sum([info['time'] for info in data_extras]) / HOUR_SECONDS total_time = session.total_time() last_data = session.datasets.last() timeline_data = [{ "type": data['kind__name'], "start": js_epoch(data['start_time']), "end": js_epoch(data['end_time']), "label": "{}: {}".format(data["kind__name"], data['name']) } for data in session.datasets.values( 'start_time', 'end_time', 'kind__name', 'name')] stats = { 'details': [{ 'title': 'Session Parameters', 'description': 'Data Collection Summary', 'style': "row", 'content': [{ 'title': '', 'kind': 'table', 'data': [ ['Total Time', humanize_duration(total_time)], [ 'First Login', timezone.localtime(session.start()).strftime('%c') ], ['Samples', session.samples().count()], ] + data_counts, 'header': 'column', 'style': 'col-12 col-md-6', }, { 'title': '', 'kind': 'table', 'data': [ [ 'Shutters Open', "{} ({:.2f}%)".format( humanize_duration(shutters), shutters * 100 / total_time if total_time else 0) ], [ 'Last Dataset', '' if not last_data else last_data.modified.strftime('%c') ], ['No. of Logins', session.stretches.count()], ] + data_stats, 'header': 'column', 'style': 'col-12 col-md-6', }, { 'title': 'Types of data collected', 'kind': 'columnchart', 'data': { 'x-label': 'Data Type', 'data': [{ 'Data Type': row['key'], 'Total': row['count'], } for row in data_extras] }, 'style': 'col-12 col-md-6' }] + [{ 'title': PARAMETER_NAMES[param].title(), 'kind': 'histogram', 'data': { 'data': [{ "x": row[0], "y": row[1] } for row in param_histograms[param]], }, 'style': 'col-12 col-md-6' } for param in ('score', 'energy', 'exposure_time', 'attenuation', 'num_frames')] + [] }, { 'title': 'Session Timeline', 'description': ('Timeline of data collection for various types of ' 'datasets during the whole session from {} to {}').format( session.start().strftime('%c'), session.end().strftime('%c')), 'style': "row", 'content': [ { 'title': 'Session Timeline', 'kind': 'timeline', 'start': js_epoch(session.start()), 'end': js_epoch(session.end()), 'data': timeline_data, 'style': 'col-12' }, { 'title': 'Inactivity Gaps', 'kind': 'table', 'data': [['', 'Start', 'End', 'Duration']] + [[ i + 1, gap[0].strftime('%c'), gap[1].strftime('%c'), natural_duration(gap[2]) ] for i, gap in enumerate(session.gaps())], 'header': 'row', 'notes': "Periods of possible inactivity while the session was open, greater than 10 minutes", 'style': 'col-12', }, ] }] } return stats
class Azimuth(GeoFunc): output_field = FloatField() arity = 2 geom_param_pos = (0, 1)
def get_context_data(self, **kwargs): context = super(OrderDetailsView, self).get_context_data(**kwargs) order = Order.objects.get(pk=self.kwargs['pk']) try: cuo = CoffeeUserOrder.objects.filter(order=order) coffees = CoffeeUserOrderQuantity.objects.filter( cuo__in=cuo).values('coffee_id', 'coffee__name', 'coffee__price').annotate( quantity=Sum('quantity')).annotate(total=ExpressionWrapper(F('coffee__price') * F('quantity'), output_field=FloatField())) context['coffees'] = coffees total = 0 for coffee in coffees: total += coffee['total'] total += order.shipping context['shipping'] = order.shipping context['total'] = total except Exception as ex: return context return context
class GeometryDistance(GeoFunc): output_field = FloatField() arity = 2 function = '' arg_joiner = ' <-> ' geom_param_pos = (0, 1)
def calcular_total(self): total = self.itemdopedido_set.all().aggregate( tot_pedido=Sum((F('quantidade') * F('produto__preco')) - F('desconto'), output_field=FloatField()) )['tot_pedido'] or 0 total = total - float(self.impostos) - float(self.desconto) self.valor = total Venda.objects.filter(id=self.id).update(valor=total)
class LineLocatePoint(GeoFunc): output_field = FloatField() arity = 2 geom_param_pos = (0, 1)
def get_prep_value(self, value): if isinstance(value, Now): return value return FloatField.get_prep_value(self, value)
def calculate_distinct_sorted_leaderboard_data(user, challenge_obj, challenge_phase_split, only_public_entries): """ Function to calculate and return the sorted leaderboard data Arguments: user {[Class object]} -- User model object challenge_obj {[Class object]} -- Challenge model object challenge_phase_split {[Class object]} -- Challenge phase split model object only_public_entries {[Boolean]} -- Boolean value to determine if the user wants to include private entries or not Returns: [list] -- Ranked list of participant teams to be shown on leaderboard [status] -- HTTP status code (200/400) """ # Get the leaderboard associated with the Challenge Phase Split leaderboard = challenge_phase_split.leaderboard # Get the default order by key to rank the entries on the leaderboard try: default_order_by = leaderboard.schema["default_order_by"] except KeyError: response_data = { "error": "Sorry, default_order_by key is missing in leaderboard schema!" } return response_data, status.HTTP_400_BAD_REQUEST # Exclude the submissions done by members of the host team # while populating leaderboard challenge_hosts_emails = ( challenge_obj.creator.get_all_challenge_host_email()) is_challenge_phase_public = challenge_phase_split.challenge_phase.is_public # Exclude the submissions from challenge host team to be displayed on the leaderboard of public phases challenge_hosts_emails = ([] if not is_challenge_phase_public else challenge_hosts_emails) challenge_host_user = is_user_a_host_of_challenge(user, challenge_obj.pk) all_banned_email_ids = challenge_obj.banned_email_ids # Check if challenge phase leaderboard is public for participant user or not if (challenge_phase_split.visibility != ChallengePhaseSplit.PUBLIC and not challenge_host_user): response_data = {"error": "Sorry, the leaderboard is not public!"} return response_data, status.HTTP_400_BAD_REQUEST leaderboard_data = LeaderboardData.objects.exclude( Q(submission__created_by__email__in=challenge_hosts_emails) & Q(submission__is_baseline=False)) # Get all the successful submissions related to the challenge phase split all_valid_submission_status = [Submission.FINISHED] # Handle the case for challenges with partial submission evaluation feature if (challenge_phase_split.challenge_phase. is_partial_submission_evaluation_enabled): all_valid_submission_status.append(Submission.PARTIALLY_EVALUATED) leaderboard_data = leaderboard_data.filter( challenge_phase_split=challenge_phase_split, submission__is_flagged=False, submission__status__in=all_valid_submission_status, ).order_by("-created_at") leaderboard_data = leaderboard_data.annotate( filtering_score=RawSQL("result->>%s", (default_order_by, ), output_field=FloatField()), filtering_error=RawSQL( "error->>%s", ("error_{0}".format(default_order_by), ), output_field=FloatField(), ), ).values( "id", "submission__participant_team", "submission__participant_team__team_name", "submission__participant_team__team_url", "submission__is_baseline", "submission__is_public", "challenge_phase_split", "result", "error", "filtering_score", "filtering_error", "leaderboard__schema", "submission__submitted_at", "submission__method_name", "submission__id", "submission__submission_metadata", ) if only_public_entries: if challenge_phase_split.visibility == ChallengePhaseSplit.PUBLIC: leaderboard_data = leaderboard_data.filter( submission__is_public=True) all_banned_participant_team = [] for leaderboard_item in leaderboard_data: participant_team_id = leaderboard_item["submission__participant_team"] participant_team = ParticipantTeam.objects.get(id=participant_team_id) all_participants_email_ids = ( participant_team.get_all_participants_email()) for participant_email in all_participants_email_ids: if participant_email in all_banned_email_ids: all_banned_participant_team.append(participant_team_id) break if leaderboard_item["error"] is None: leaderboard_item.update(filtering_error=0) if leaderboard_item["filtering_score"] is None: leaderboard_item.update(filtering_score=0) if challenge_phase_split.show_leaderboard_by_latest_submission: sorted_leaderboard_data = leaderboard_data else: sorted_leaderboard_data = sorted( leaderboard_data, key=lambda k: ( float(k["filtering_score"]), float(-k["filtering_error"]), ), reverse=True if challenge_phase_split.is_leaderboard_order_descending else False, ) distinct_sorted_leaderboard_data = [] team_list = [] for data in sorted_leaderboard_data: if (data["submission__participant_team__team_name"] in team_list or data["submission__participant_team"] in all_banned_participant_team): continue elif data["submission__is_baseline"] is True: distinct_sorted_leaderboard_data.append(data) else: distinct_sorted_leaderboard_data.append(data) team_list.append(data["submission__participant_team__team_name"]) leaderboard_labels = challenge_phase_split.leaderboard.schema["labels"] for item in distinct_sorted_leaderboard_data: item_result = [] for index in leaderboard_labels: # Handle case for partially evaluated submissions if index in item["result"].keys(): item_result.append(item["result"][index]) else: item_result.append("#") item["result"] = item_result if item["error"] is not None: item["error"] = [ item["error"]["error_{0}".format(index)] for index in leaderboard_labels ] return distinct_sorted_leaderboard_data, status.HTTP_200_OK