from django.db.models import When, Value REGULAR_USER = When( user__consultant__isnull=True, then=Value(True), ) CONSULTANT_USER = When( user__consultant__isnull=False, user__consultant__status='A', then=Value(True), )
def monthly_rate(self, target_date=None): return self.active_subscriptions(target_date).aggregate( rate=Coalesce(Sum('monthly_rate'), Value(0.00)))['rate']
def with_reference_price_string(self): """ Returns a MeasureCondition queryset annotated with ``reference_price_string`` query expression. This expression should evaluate to a valid reference price string (https://uktrade.github.io/tariff-data-manual/documentation/data-structures/measure-conditions.html#condition-codes) If a condition has no duty_amount value, then this expression evaluates to an empty string value (""). Else it returns the result of three Case() expressions chained together. The first Case() expression evaluates to "%" if the condition has a duty amount and no monetary unit, else " ". The second evaluates to "" when a condition has no condition_measurement or its measurement has no measurement unit or the measurement unit has no abbreviation, else, if it has no monetary unit, the measurement unit abbreviation is returned, else, if it has a monetary unit, the abbreviation is returned prefixed by " / ". The third evaluates to "" when a measurement unit qualifier has no abbreviation, else the unit qualifier abbreviation is returned prefixed by " / ". """ return self.annotate(reference_price_string=Case( When( duty_amount__isnull=True, then=Value(""), ), default=Concat( "duty_amount", Case( When( monetary_unit=None, duty_amount__isnull=False, then=Value("%"), ), default=Concat( Value(" "), F("monetary_unit__code"), ), ), Case( When( Q(condition_measurement__isnull=True) | Q(condition_measurement__measurement_unit__isnull=True, ) | Q(condition_measurement__measurement_unit__abbreviation__isnull =True, ), then=Value(""), ), When( monetary_unit__isnull=True, then=F( "condition_measurement__measurement_unit__abbreviation", ), ), default=Concat( Value(" / "), F( "condition_measurement__measurement_unit__abbreviation", ), ), ), Case( When( condition_measurement__measurement_unit_qualifier__abbreviation__isnull =True, then=Value(""), ), default=Concat( Value(" / "), F( "condition_measurement__measurement_unit_qualifier__abbreviation", ), ), ), output_field=CharField(), ), ), )
def update_total(sender, instance, *args, **kwargs): total = instance.purchasetotal.aggregate( the_sum=Coalesce(Sum('Total_p'), Value(0)))['the_sum'] instance.Total_Purchase = total
def get_visualisations_data_for_user_matching_query(visualisations: QuerySet, query, user=None): """ Filters the visualisation queryset for: 1) visibility (whether the user can know if the visualisation exists) 2) matches the search terms Annotates the visualisation queryset with: 1) `has_access`, if the user can use the visualisation. """ # Filter out visualisations that the user is not allowed to even know about. if not (user and user.has_perm( dataset_type_to_manage_unpublished_permission_codename( DataSetType.VISUALISATION.value))): visualisations = visualisations.filter(published=True) # Filter out visualisations that don't match the search terms search = SearchVector('name', weight='A', config='english') + SearchVector( 'short_description', weight='B', config='english') search_query = SearchQuery(query, config='english') visualisations = visualisations.annotate(search=search, search_rank=SearchRank( search, search_query)) if query: visualisations = visualisations.filter(search=search_query) # Mark up whether the user can access the visualisation. if user: access_filter = (Q(user_access_type='REQUIRES_AUTHENTICATION') & (Q(visualisationuserpermission__user=user) | Q(visualisationuserpermission__isnull=True))) | Q( user_access_type='REQUIRES_AUTHORIZATION', visualisationuserpermission__user=user, ) else: access_filter = Q() visualisations = visualisations.annotate(_has_access=Case( When(access_filter, then=True), default=False, output_field=BooleanField(), ) if access_filter else Value(True, BooleanField()), ) # Pull in the source tag IDs for the dataset visualisations = visualisations.annotate( source_tag_ids=Value([], ArrayField(UUIDField()))) visualisations = visualisations.annotate( source_tag_names=Value([], ArrayField(CharField(max_length=256)))) # Define a `purpose` column denoting the dataset type visualisations = visualisations.annotate( purpose=Value(DataSetType.VISUALISATION.value, IntegerField())) # We are joining on the user permissions table to determine `_has_access`` to the visualisation, so we need to # group them and remove duplicates. We aggregate all the `_has_access` fields together and return true if any # of the records say that access is available. visualisations = visualisations.values( 'id', 'name', 'slug', 'short_description', 'search_rank', 'source_tag_names', 'source_tag_ids', 'purpose', 'published', 'published_at', ).annotate(has_access=BoolOr('_has_access')) return visualisations.values( 'id', 'name', 'slug', 'short_description', 'search_rank', 'source_tag_names', 'source_tag_ids', 'purpose', 'published', 'published_at', 'has_access', )
def __init__(self, expr): super().__init__(expr) expr = self.source_expressions[0] if isinstance(expr, Value) and not expr._output_field_or_none: self.source_expressions[0] = Value( expr.value, output_field=RasterField(srid=expr.value.srid))
def updateFoodPrice(cls, menu_id, category_products, centers, start, end, price, category, user, tracking_id): # Get all historical price records separator = '---' old_prices = FoodPrice.objects \ .annotate( category_product=Concat('category', Value(separator), 'product') ) \ .filter( menu=menu_id, center_id__in=centers, status='active', category_product__in=['{}{}{}'.format(category, separator, product) for category, product in category_products] ) if start and end: old_prices = old_prices.exclude( Q(start__gt=end) | Q(end__lt=start)) elif start: old_prices = old_prices.exclude(Q(end__lt=start)) elif end: old_prices = old_prices.exclude(Q(start__gt=end)) old_prices_records = pd.DataFrame.from_records(old_prices.values()) # Add new prices price_records, num = cls.getFoodByCenter(menu_id, start, category, None, None, centers, onlyPrice=True) if not price_records.empty: price_records = price_records \ .sort_values(['center_id', 'menu', 'category', 'product_num', 'action_time']) \ .drop_duplicates(['center_id', 'menu', 'category', 'product_num'], keep='last') # Filter by selections price_records['category_product'] = price_records[ 'category'] + '---' + price_records['product_num'] price_records = price_records[ price_records['category_product'].isin([ '{}---{}'.format(category, product) for category, product in category_products ])] else: price_records = pd.DataFrame( columns=['center_id', 'menu_id', 'category', 'product_num']) food_menu = Menu.objects.get(menu_id=menu_id) for center in centers: center_obj = Centers.objects.get(center_id=center) for category_product in category_products: category, product_id = category_product product = Product.objects.get(product_id=product_id) row = price_records[ (price_records['center_id'] == str(center)) & (price_records['menu_id'] == int(menu_id)) & (price_records['category'] == category) & (price_records['product_num'] == product_id)] if row.empty: price_old = None price_new = cls.price_converter(None, price['price_symbol'], price['price'], price['unit']) else: price_old = row['price'].values[0] price_new = cls.price_converter(price_old, price['price_symbol'], price['price'], price['unit']) tracking_obj = Tracking.objects.get(tracking_id=tracking_id) FoodPrice.objects.create(product=product, center_id=center_obj, menu=food_menu, category=category, price=price_new, start=start, end=end, action_user=user, tracking_id=tracking_obj) # Tracking Change Report description = 'Change food "{food}" in menu "{menu}" category "{category}" price from "${price_old}" to "${price_new}"'\ .format(food=product_id, menu=food_menu.menu_name, category=category, price_old=price_old, price_new=price_new) FoodChangeReport.objects \ .create\ ( tracking_id=tracking_obj, username=user, center_id=center_obj, product_id=product, category=category, menu_id2=food_menu, description=description, price_old=price_old, price_new=price_new, product_start=start, product_end=end ) # Revise old prices for index, row in old_prices_records.iterrows(): start_old, end_old = row['start'], row['end'] start_old = None if str( start_old) == 'NaT' or not start_old else start_old.date() end_old = None if str( end_old) == 'NaT' or not end_old else end_old.date() price_obj = FoodPrice.objects.get(id=row['id']) if start_old and not end_old and start and not end: if start_old >= start: price_obj.status = 'inactive' else: price_obj.end = max(end - td(days=1), start_old) price_obj.save() elif start_old and end_old and start and end: if start <= start_old <= end_old <= end: price_obj.status = 'inactive' elif start <= start_old <= end <= end_old: price_obj.start = min(end + td(days=1), end_old) elif start_old <= start <= end_old <= end: price_obj.end = max(start - td(days=1), start_old) price_obj.save() elif start_old and not end_old and start and end: if start <= start_old <= end: price_obj.start = end + td(days=1) price_obj.save()
def test_create_func(self): instance = ModelWithVanillaMoneyField.objects.create( money=Func(Value(-10), function='ABS')) instance.refresh_from_db() assert instance.money.amount == 10
def test_value_create(self, value, expected): instance = NullMoneyFieldModel.objects.create(field=Value(value)) instance.refresh_from_db() assert instance.field == expected
def export(self, request): """Export filtered reports to given file format.""" queryset = self.get_queryset().select_related( "task__project__billing_type", "task__cost_center", "task__project__cost_center", ) queryset = self.filter_queryset(queryset) queryset = queryset.annotate( cost_center=Case( # Task cost center has precedence over project cost center When( task__cost_center__isnull=False, then=F("task__cost_center__name") ), When( task__project__cost_center__isnull=False, then=F("task__project__cost_center__name"), ), default=Value(""), output_field=CharField(), ) ) queryset = queryset.annotate( billing_type=Case( When( task__project__billing_type__isnull=False, then=F("task__project__billing_type__name"), ), default=Value(""), output_field=CharField(), ) ) if ( settings.REPORTS_EXPORT_MAX_COUNT > 0 and queryset.count() > settings.REPORTS_EXPORT_MAX_COUNT ): return Response( _( "Your request exceeds the maximum allowed entries ({0} > {1})".format( queryset.count(), settings.REPORTS_EXPORT_MAX_COUNT ) ), status=status.HTTP_400_BAD_REQUEST, ) colnames = [ "Date", "Duration", "Customer", "Project", "Task", "User", "Comment", "Billing Type", "Cost Center", ] content = queryset.values_list( "date", "duration", "task__project__customer__name", "task__project__name", "task__name", "user__username", "comment", "billing_type", "cost_center", ) file_type = request.query_params.get("file_type") if file_type not in ["csv", "xlsx", "ods"]: return HttpResponseBadRequest() sheet = django_excel.pe.Sheet(content, name="Report", colnames=colnames) return django_excel.make_response( sheet, file_type=file_type, file_name="report.%s" % file_type )
def get_channel_data(channel, site, default_thumbnail=None): import time start = time.time() print "Starting " + channel.name data = { "name": channel.name, "id": channel.id, "public": "Yes" if channel.public else "No", "description": channel.description, "language": channel.language and channel.language.readable_name, "generated_thumbnail": default_thumbnail != None and generate_thumbnail(channel) or default_thumbnail, "url": "http://{}/channels/{}/edit".format(site, channel.id) } # Get information related to channel tags = channel.secret_tokens.values_list('token', flat=True) data["tokens"] = ", ".join( ["{}-{}".format(t[:5], t[5:]) for t in tags if t != channel.id]) data["editors"] = ", ".join(list(channel.editors.annotate(name=Concat('first_name', Value(' '), \ 'last_name', Value(' ('), 'email', Value(')'),\ output_field=CharField()))\ .values_list('name', flat=True))) data["tags"] = ", ".join( channel.tags.exclude(tag_name=None).values_list('tag_name', flat=True).distinct()) # Get information related to nodes nodes = channel.main_tree.get_descendants()\ .select_related('parent', 'language', 'kind')\ .prefetch_related('files') # Get sample path at longest path max_level = nodes.aggregate(max_level=Max('level'))['max_level'] deepest_node = nodes.filter(level=max_level).first() if deepest_node: pathway = deepest_node.get_ancestors(include_self=True)\ .exclude(pk=channel.main_tree.pk)\ .annotate(name=Concat('title', Value(' ('), 'kind_id', Value(')'), output_field=CharField()))\ .values_list('name', flat=True) data["sample_pathway"] = " -> ".join(pathway) else: data["sample_pathway"] = "Channel is empty" # Get language information node_languages = nodes.exclude(language=None).values_list( 'language__readable_name', flat=True).distinct() file_languages = nodes.exclude(files__language=None).values_list( 'files__language__readable_name', flat=True) language_list = list(set(chain(node_languages, file_languages))) language_list = filter(lambda l: l != None and l != data['language'], language_list) language_list = map(lambda l: l.replace(",", " -"), language_list) language_list = sorted(map(lambda l: l.replace(",", " -"), language_list)) data["languages"] = ", ".join(language_list) # Get kind information kind_list = nodes.values('kind_id')\ .annotate(count=Count('kind_id'))\ .order_by('kind_id') data["kind_counts"] = ", ".join( [pluralize_kind(k['kind_id'], k['count']) for k in kind_list]) # Get file size data["total_size"] = sizeof_fmt(nodes.exclude(kind_id=content_kinds.EXERCISE, published=False)\ .values('files__checksum', 'files__file_size')\ .distinct()\ .aggregate(size=Sum('files__file_size'))['size'] or 0) print channel.name + " time:", time.time() - start return data
def test_filter_annotation(self): books = Book.objects.annotate( is_book=Value(1, output_field=IntegerField())).filter(is_book=1) for book in books: self.assertEqual(book.is_book, 1)
def test_delete_subfunc(self): say_sub = ConcatWS(Value('s'), Value('ub'), separator='') DynamicModel.objects.update(attrs=ColumnDelete('attrs', say_sub)) m = DynamicModel.objects.get() assert m.attrs == {'flote': 1.0}
def reverse_func(apps, schema_editor): PreprintContributor = apps.get_model('osf', 'PreprintContributor') PreprintTags = apps.get_model('osf', 'Preprint_Tags') NodeSettings = apps.get_model('addons_osfstorage', 'NodeSettings') AbstractNode = apps.get_model('osf', 'AbstractNode') Preprint = apps.get_model('osf', 'Preprint') BaseFileNode = apps.get_model('osf', 'BaseFileNode') PageCounter = apps.get_model('osf', 'PageCounter') Guid = apps.get_model('osf', 'Guid') preprints = [] files = [] nodes = [] modified_field = Preprint._meta.get_field('modified') modified_field.auto_now = False node_modified_field = AbstractNode._meta.get_field('modified') node_modified_field.auto_now = False for preprint in Preprint.objects.filter( node__isnull=False).select_related('node'): node = preprint.node preprint.title = 'Untitled' preprint.description = '' preprint.creator = None preprint.article_doi = '' preprint.is_public = True preprint.region_id = None preprint.spam_status = None preprint.spam_pro_tip = '' preprint.spam_data = {} preprint.date_last_reported = None preprint.reports = {} preprint_file = None if preprint.primary_file: preprint_file = BaseFileNode.objects.get( id=preprint.primary_file.id) preprint_file.target_object_id = node.id preprint_file.target_content_type_id = ContentType.objects.get_for_model( AbstractNode).id preprint_file.parent_id = NodeSettings.objects.get( owner_id=node.id).root_node_id node_id = Guid.objects.get( content_type=ContentType.objects.get_for_model( AbstractNode).id, object_id=node.id)._id preprint_id = Guid.objects.get( content_type=ContentType.objects.get_for_model(Preprint).id, object_id=preprint.id)._id PageCounter.objects.filter( Q(_id__contains=preprint_id) & Q(_id__contains=preprint_file._id)).update( _id=Func(F('_id'), Value(preprint_id), Value(node_id), function='replace')) node.preprint_file = preprint_file preprint.primary_file = None preprint.deleted = None preprint.migrated = None preprints.append(preprint) nodes.append(node) files.append(preprint_file) # Deleting the particular preprint admin/read/write groups will remove the users from the groups # and their permission to these preprints Group.objects.get(name=format_group(preprint, 'admin')).delete() Group.objects.get(name=format_group(preprint, 'write')).delete() Group.objects.get(name=format_group(preprint, 'read')).delete() PreprintContributor.objects.all().delete() PreprintTags.objects.all().delete() bulk_update(preprints, update_fields=[ 'title', 'description', 'creator', 'article_doi', 'is_public', 'region_id', 'deleted', 'migrated', 'modified', 'primary_file', 'spam_status', 'spam_pro_tip', 'spam_data', 'date_last_reported', 'reports' ]) bulk_update(nodes, update_fields=['preprint_file']) bulk_update(files) # Order is important - remove the preprint root folders after the files have been saved back onto the nodes BaseFileNode.objects.filter( type='osf.osfstoragefolder', is_root=True, target_content_type_id=ContentType.objects.get_for_model( Preprint).id).delete() modified_field.auto_now = True node_modified_field.auto_now = True
def handle(self, *args, **options): verbosity = options['verbosity'] if len(list(connections)) > 1: raise NotImplementedError( "This management command does not support multiple-database " "configurations" ) connection = connections['default'] if connection.Database.__name__ != 'psycopg2': raise NotImplementedError( "Only the `psycopg2` database backend is supported") instances = Instance.objects.all().order_by('pk') xforms = XForm.objects.all() for option in ( 'pk__gte', 'xform__id_string', 'xform__user__username', 'xml__contains' ): if options[option]: instances = instances.filter(**{option: options[option]}) if option.startswith('xform__'): xforms = xforms.filter( **{option[len('xform__'):]: options[option]} ) instances = instances.annotate( root_node_name=Func( F('xml'), Value(INSTANCE_ROOT_NODE_NAME_PATTERN), function='regexp_matches' ) ).values_list('pk', 'xform_id', 'root_node_name') if not instances.exists(): self.stderr.write('No Instances found.') return t0 = time.time() self.stderr.write( 'Fetching Instances; please allow several minutes...', ending='') instances = list(instances) self.stderr.write( 'got {} in {} seconds.'.format( len(instances), int(time.time() - t0) ) ) # Getting the XForm root node names separately is far more efficient # than calling `regexp_matches` on `xform__xml` in the `Instance` query xforms = xforms.annotate( root_node_name=Func( F('xml'), Value(XFORM_ROOT_NODE_NAME_PATTERN), function='regexp_matches' ) ).values_list('pk', 'root_node_name') self.stderr.write('Fetching XForm root node names...', ending='') t0 = time.time() xform_root_node_names = dict(xforms) self.stderr.write( 'got {} in {} seconds.'.format( len(xform_root_node_names), int(time.time() - t0) ) ) completed_instances = 0 changed_instances = 0 failed_instances = 0 progress_interval = 1 # second t0 = time.time() t_last = t0 self.stdout.write( 'Instance\tXForm\tOld Root Node Name\tNew Root Node Name') for instance in instances: t_now = time.time() if (verbosity > 1 and t_now - t_last >= progress_interval and completed_instances): t_last = t_now t_elapsed = t_now - t0 write_same_line( self.stderr, 'Completed {} Instances: {} changed, {} failed; ' '{}s elapsed, {} Instance/sec.'.format( completed_instances, changed_instances, failed_instances, int(t_elapsed), int(completed_instances / t_elapsed) ) ) instance_id = instance[0] xform_id = instance[1] # `regexp_matches` results come back as `list`s from the ORM instance_root_node_name = instance[2] xform_root_node_name = xform_root_node_names[xform_id] if not len(instance_root_node_name) == 1: self.stderr.write( '!!! Failed to get root node name for Instance {}'.format( instance_id) ) failed_instances += 1 completed_instances += 1 continue if not len(xform_root_node_name) == 1: self.stderr.write( '!!! Failed to get root node name for XForm {}'.format( xform_id) ) failed_instances += 1 completed_instances += 1 continue instance_root_node_name = instance_root_node_name[0] xform_root_node_name = xform_root_node_name[0] if instance_root_node_name == xform_root_node_name: completed_instances += 1 continue queryset = Instance.objects.filter(pk=instance_id).only('xml') fixed_xml = replace_first_and_last( queryset[0].xml, instance_root_node_name, xform_root_node_name) new_xml_hash = Instance.get_hash(fixed_xml) queryset.update(xml=fixed_xml, xml_hash=new_xml_hash) self.stdout.write('{}\t{}\t{}\t{}'.format( instance_id, xform_id, instance_root_node_name, xform_root_node_name )) changed_instances += 1 completed_instances += 1 self.stderr.write( '\nFinished {} Instances: {} changed, {} failed.'.format( completed_instances, changed_instances, failed_instances ) ) self.stdout.write( 'At the start of processing, the last instance PK ' 'was {}.'.format(instance_id) )
def test_value_create_invalid(self): with pytest.raises(ValidationError): ModelWithVanillaMoneyField.objects.create(money=Value('string'))
def reverse_func(apps, schema_editor): # pragma: no cover icon = apps.get_model('djangocms_icon', 'Icon') icon.objects.filter(icon__startswith='el ').update(icon=Func( F('icon'), Value('el '), Value(''), function='replace', )) icon.objects.filter(icon__startswith='flag-icon ').update(icon=Func( F('icon'), Value('flag-icon '), Value(''), function='replace', )) icon.objects.filter(icon__startswith='fa ').update(icon=Func( F('icon'), Value('fa '), Value(''), function='replace', )) icon.objects.filter(icon__startswith='glyphicon ').update(icon=Func( F('icon'), Value('glyphicon '), Value(''), function='replace', )) icon.objects.filter(icon__startswith='map-icon ').update(icon=Func( F('icon'), Value('map-icon '), Value(''), function='replace', )) icon.objects.filter(icon__startswith='zmdi ').update(icon=Func( F('icon'), Value('zmdi '), Value(''), function='replace', )) icon.objects.filter(icon__startswith='wi ').update(icon=Func( F('icon'), Value('wi '), Value(''), function='replace', ))
def stats(self, profile_uid=None, minutes=None, server_id=None): game_case = Case( When(game="bj", then=Value("blackjack")), When(game="cf", then=Value("coinflip")), default="game" ) qs = self.get_queryset() if profile_uid: qs = qs.filter(profile_id=profile_uid) if minutes: after = datetime.datetime.now(tz=datetime.timezone.utc) - datetime.timedelta(minutes=minutes) qs = qs.filter(created__gt=after) if server_id: qs = qs.filter(profile__server_id=server_id) earnings = ( qs.values("game") .order_by("game") .annotate( g=game_case, big_win=Max( Case( When(net__gt=0, then="net"), default=0, ) ), big_loss=Min( Case( When(net__lt=0, then="net"), default=0, ) ), total=Sum("net"), ) ) earnings_results = earnings.values("g", "big_win", "big_loss", "total") if not earnings_results: return (("No Results", "No games could be found."),) game_col_size, min_col_size = 15, 8 win_col_size = max(max([len(str(f"{r['big_win']:,}")) for r in earnings_results]), min_col_size) loss_col_size = max(max([len(str(f"{r['big_loss']:,}")) for r in earnings_results]), min_col_size) total_col_size = max(max([len(str(f"{r['total']:,}")) for r in earnings_results]), min_col_size) biggest_net, lifetime = ( f"{'Game':<{game_col_size}} {'Big Win':>{win_col_size}} {'Big Loss':>{loss_col_size}}\n", "", ) t = SimpleNamespace(big_win=0, big_loss=0, total=0) for game in earnings_results: g = SimpleNamespace(**game) biggest_net += f"{g.g:{game_col_size}} ==> {g.big_win:{win_col_size},} {g.big_loss:{loss_col_size},}\n" lifetime += f"{g.g:{game_col_size}} ==> {g.total:{total_col_size},}\n" t.big_win, t.big_loss, t.total = t.big_win + g.big_win, t.big_loss + g.big_loss, t.total + g.total biggest_net = f"```\n{biggest_net}{'Total':{game_col_size}} ==> {t.big_win:{win_col_size},} {t.big_loss:{loss_col_size},}\n```" lifetime = f"```\n{lifetime}{'Total':{game_col_size}} ==> {t.total:{total_col_size},}```" games_played = ( qs.values("game") .order_by("game") .annotate( g=game_case, won=Sum( Case( When(outcome="won", then=1), default=0, output_field=models.IntegerField(), ) ), lost=Sum( Case( When(outcome="lost", then=1), default=0, output_field=models.IntegerField(), ) ), tied=Sum( Case( When(outcome="tied", then=1), default=0, output_field=models.IntegerField(), ) ), total=Count("net"), ) ) games_played_results = games_played.values("g", "won", "lost", "tied", "total") game_col_size, min_col_size = 15, 6 win_col_size = max(max([len(str(f"{r['won']:,}")) for r in games_played_results]), min_col_size) loss_col_size = max(max([len(str(f"{r['lost']:,}")) for r in games_played_results]), min_col_size) tied_col_size = max(max([len(str(f"{r['tied']:,}")) for r in games_played_results]), min_col_size) total_col_size = max(max([len(str(f"{r['total']:,}")) for r in games_played_results]), min_col_size) t = SimpleNamespace(wins=0, losses=0, ties=0, total=0) games_played = f"{'Game':<{game_col_size}} {'Wins':>{win_col_size}} {'Losses':>{loss_col_size}} {'Ties':>{tied_col_size}} {'Total':>{total_col_size}}\n" for game in games_played_results: g = SimpleNamespace(**game) games_played += f"{g.g:{game_col_size}} ==> {g.won:{win_col_size},} {g.lost:{loss_col_size},} {g.tied:{tied_col_size},} {g.total:{total_col_size},}\n" t.wins, t.losses, t.ties, t.total = t.wins + g.won, t.losses + g.lost, t.ties + g.tied, t.total + g.total games_played = f"```\n{games_played}{'Total':{game_col_size}} ==> {t.wins:{win_col_size},} {t.losses:{loss_col_size},} {t.ties:{tied_col_size},} {t.total:{total_col_size},}\n```" return ( ("Games Played", games_played), ("Biggest Net", biggest_net), ("Lifetime Winnins", lifetime), )
def all_routes(self, request, *args, **kwargs): queryset = Route.objects.all().annotate( type=Case(When(Q(driver__isnull=False), then=Value('driver')), output_field=CharField(), default=Value('passenger'))).filter(type='driver') serializer = self.get_serializer(queryset, many=True) return Response(serializer.data)
def annotate_predictions_results(queryset): if settings.DJANGO_DB == settings.DJANGO_DB_SQLITE: return queryset.annotate(predictions_results=Coalesce( GroupConcat("predictions__result"), Value(''), output_field=models.CharField())) else: return queryset.annotate(predictions_results=ArrayAgg("predictions__result", distinct=True))
def handle(self, *args, form_slug, **options): now = timezone.now() form = PersonForm.objects.get(slug=form_slug) assemblee_circo = EventSubtype.objects.get( label="assemblee-circonscription") submissions = form.submissions.annotate( evenement_cree=Exists( Event.objects.annotate(submission_id=Cast( Func( F("meta"), Value("submission_id"), function="jsonb_extract_path_text", ), output_field=IntegerField(), )).filter(submission_id=OuterRef("id"))), possible_date=Cast( Func( F("data"), Value("date"), function="jsonb_extract_path_text", ), output_field=DateTimeField(), ), ).filter(evenement_cree=False, possible_date__gt=now) logger.info(f"Création de {submissions.count()} nouveaux événements") # créer les projets correspondants for s in submissions: name = f'Assemblée de circonscription à {s.data["location_city"]}' date = parse_date(s.data["date"]) try: group = SupportGroup.objects.get(id=s.data["premier_groupe"]) except SupportGroup.DoesNotExist: group = None logger.debug(f"Création événement « {name} »") with transaction.atomic(): event = Event.objects.create( name=name[:Event._meta.get_field("name").max_length], visibility=Event.VISIBILITY_ORGANIZER, subtype=assemblee_circo, start_time=date, end_time=date + timedelta(hours=3), **{ k: v[:Event._meta.get_field(k).max_length] for k, v in s.data.items() if k in [ "location_name", "location_address1", "location_zip", "location_city", ] }, description=description_from_submission(s), meta={"submission_id": s.id}, ) OrganizerConfig.objects.create(event=event, person=s.person, as_group=group) geocode_event.delay(event.pk)
def annotate_annotators(queryset): if settings.DJANGO_DB == settings.DJANGO_DB_SQLITE: return queryset.annotate(annotators=Coalesce( GroupConcat("annotations__completed_by"), Value(''), output_field=models.CharField())) else: return queryset.annotate(annotators=ArrayAgg("annotations__completed_by", distinct=True))
def update_total_sales(sender, instance, *args, **kwargs): total1 = instance.saletotal.aggregate( the_sum=Coalesce(Sum('Total'), Value(0)))['the_sum'] instance.Total_Amount = total1
def get_queryset(self): if self.request.user.is_staff and self.request.user.should_deny_admin( ): return Category.objects.none() team = self.request.user.team if team is not None: solves = Solve.objects.filter(team=team, correct=True) solved_challenges = solves.values_list('challenge') challenges = Challenge.objects.prefetch_related( 'unlocked_by').annotate( unlocked=Case( When(auto_unlock=True, then=Value(True)), When(Q(unlocked_by__in=Subquery(solved_challenges)), then=Value(True)), default=Value(False), output_field=models.BooleanField()), solved=Case(When(Q(id__in=Subquery(solved_challenges)), then=Value(True)), default=Value(False), output_field=models.BooleanField()), solve_count=Count('solves', filter=Q(solves__correct=True)), unlock_time_surpassed=Case( When(release_time__lte=timezone.now(), then=Value(True)), default=Value(False), output_field=models.BooleanField(), )) else: challenges = (Challenge.objects.filter( release_time__lte=timezone.now()).annotate( unlocked=Case(When(auto_unlock=True, then=Value(True)), default=Value(False), output_field=models.BooleanField()), solved=Value(False, models.BooleanField()), solve_count=Count('solves'), unlock_time_surpassed=Case( When(release_time__lte=timezone.now(), then=Value(True)), default=Value(False), output_field=models.BooleanField(), ))) x = challenges.prefetch_related( Prefetch('hint_set', queryset=Hint.objects.annotate( used=Case(When(id__in=HintUse.objects.filter( team=team).values_list('hint_id'), then=Value(True)), default=Value(False), output_field=models.BooleanField())), to_attr='hints'), Prefetch('file_set', queryset=File.objects.all(), to_attr='files'), Prefetch( 'tag_set', queryset=Tag.objects.all() if time.time() > config.get('end_time') else Tag.objects.filter( post_competition=False), to_attr='tags'), 'unlocks', 'first_blood', 'hint_set__uses') if self.request.user.is_staff: categories = Category.objects else: categories = Category.objects.filter( release_time__lte=timezone.now()) qs = categories.prefetch_related( Prefetch('category_challenges', queryset=x, to_attr='challenges')) return qs
def get_datasets_data_for_user_matching_query(datasets: QuerySet, query, use=None, user=None, id_field='id'): """ Filters the dataset queryset for: 1) visibility (whether the user can know if the dataset exists) 2) matches the search terms Annotates the dataset queryset with: 1) `has_access`, if the user can use the dataset's data. """ is_reference_query = datasets.model is ReferenceDataset # Filter out datasets that the user is not allowed to even know about. visibility_filter = Q(published=True) if user: if is_reference_query: reference_type = DataSetType.REFERENCE.value reference_perm = dataset_type_to_manage_unpublished_permission_codename( reference_type) if user.has_perm(reference_perm): visibility_filter |= Q(published=False) if datasets.model is DataSet: master_type, datacut_type = ( DataSetType.MASTER.value, DataSetType.DATACUT.value, ) master_perm = dataset_type_to_manage_unpublished_permission_codename( master_type) datacut_perm = dataset_type_to_manage_unpublished_permission_codename( datacut_type) if user.has_perm(master_perm): visibility_filter |= Q(published=False, type=master_type) if user.has_perm(datacut_perm): visibility_filter |= Q(published=False, type=datacut_type) datasets = datasets.filter(visibility_filter) # Filter out datasets that don't match the search terms search = (SearchVector('name', weight='A', config='english') + SearchVector('short_description', weight='B', config='english') + SearchVector(StringAgg('tags__name', delimiter='\n'), weight='B', config='english')) search_query = SearchQuery(query, config='english') datasets = datasets.annotate(search=search, search_rank=SearchRank(search, search_query)) if query: datasets = datasets.filter(search=search_query) # Mark up whether the user can access the data in the dataset. access_filter = Q() if user and datasets.model is not ReferenceDataset: access_filter &= (Q(user_access_type='REQUIRES_AUTHENTICATION') & (Q(datasetuserpermission__user=user) | Q(datasetuserpermission__isnull=True))) | Q( user_access_type='REQUIRES_AUTHORIZATION', datasetuserpermission__user=user) datasets = datasets.annotate(_has_access=Case( When(access_filter, then=True), default=False, output_field=BooleanField(), ) if access_filter else Value(True, BooleanField()), ) # Pull in the source tag IDs for the dataset datasets = datasets.annotate(source_tag_ids=ArrayAgg( 'tags', filter=Q(tags__type=TagType.SOURCE.value), distinct=True)) datasets = datasets.annotate(source_tag_names=ArrayAgg( 'tags__name', filter=Q( tags__type=TagType.SOURCE.value), distinct=True)) # Define a `purpose` column denoting the dataset type. if is_reference_query: datasets = datasets.annotate( purpose=Value(DataSetType.REFERENCE.value, IntegerField())) else: datasets = datasets.annotate(purpose=F('type')) # We are joining on the user permissions table to determine `_has_access`` to the dataset, so we need to # group them and remove duplicates. We aggregate all the `_has_access` fields together and return true if any # of the records say that access is available. datasets = datasets.values( id_field, 'name', 'slug', 'short_description', 'search_rank', 'source_tag_names', 'source_tag_ids', 'purpose', 'published', 'published_at', ).annotate(has_access=BoolOr('_has_access')) return datasets.values( id_field, 'name', 'slug', 'short_description', 'search_rank', 'source_tag_names', 'source_tag_ids', 'purpose', 'published', 'published_at', 'has_access', )
def list_assets_view(request): # Check sorting options allowed_sort_options = [ "id", "name", "criticity_num", "score", "type", "updated_at", "risk_level", "risk_level__grade", "-id", "-name", "-criticity_num", "-score", "-type", "-updated_at", "-risk_level", "-risk_level__grade" ] sort_options = request.GET.get("sort", "-updated_at") sort_options_valid = [] for s in sort_options.split(","): if s in allowed_sort_options and s not in sort_options_valid: sort_options_valid.append(str(s)) # Check Filtering options filter_options = request.GET.get("filter", "") # Todo: filter on fields allowed_filter_fields = ["id", "name", "criticity", "type", "score"] filter_criterias = filter_options.split(" ") filter_fields = {} filter_opts = "" for criteria in filter_criterias: field = criteria.split(":") if len(field) > 1 and field[0] in allowed_filter_fields: # allowed field if field[0] == "score": filter_fields.update({"risk_level__grade": field[1]}) else: filter_fields.update({str(field[0]): field[1]}) else: filter_opts = filter_opts + str(criteria.strip()) # Query assets_list = Asset.objects.filter(**filter_fields).filter( Q(value__icontains=filter_opts) | Q(name__icontains=filter_opts) | Q(description__icontains=filter_opts)).annotate(criticity_num=Case( When(criticity="high", then=Value("1")), When(criticity="medium", then=Value("2")), When(criticity="low", then=Value("3")), default=Value("1"), output_field=CharField())).annotate( cat_list=ArrayAgg('categories__value')).order_by( *sort_options_valid) # Pagination assets nb_rows = int(request.GET.get('n', 16)) assets_paginator = Paginator(assets_list, nb_rows) page = request.GET.get('page') try: assets = assets_paginator.page(page) except PageNotAnInteger: assets = assets_paginator.page(1) except EmptyPage: assets = assets_paginator.page(assets_paginator.num_pages) # List asset groups asset_groups = [] ags = AssetGroup.objects.all().annotate( asset_list=ArrayAgg('assets__value')).only("id", "name", "assets", "criticity", "updated_at", "risk_level") for asset_group in ags: assets_names = "" if asset_group.asset_list != [None]: assets_names = ", ".join(asset_group.asset_list) ag = { "id": asset_group.id, "name": asset_group.name, "criticity": asset_group.criticity, "updated_at": asset_group.updated_at, "assets_names": assets_names, "risk_grade": asset_group.risk_level['grade'] } asset_groups.append(ag) return render(request, 'list-assets.html', { 'assets': assets, 'asset_groups': asset_groups })
def with_effective_valid_between(self): """ There are five ways in which measures can get end dated: 1. Where the measure is given an explicit end date on the measure record itself 2. Where the measure's generating regulation is a base regulation, and the base regulation itself is end-dated 3. Where the measure's generating regulation is a modification regulation, and the modification regulation itself is end-dated 4. Where the measure's generating regulation is a base regulation, and any of the modification regulations that modify it are end-dated 5. Where the measure's generating regulation is a modification regulation, and the base regulation that it modifies is end-dated Numbers 2–5 also have to take account of the "effective end date" which if set should be used over any explicit date. The effective end date is set when other types of regulations are used (abrogation, prorogation, etc). """ # Computing the end date for case 4 is expensive because it involves # aggregating over all of the modifications to the base regulation, # where there is one. So we pull this out into a CTE to let Postgres # know that none of this caluclation depends on the queryset filters. # # We also turn NULLs into "infinity" such that they sort to the top: # i.e. if any modification regulation is open-ended, so is the measure. # We then turn infinity back into NULL to be used in the date range. Regulation = self.model._meta.get_field( "generating_regulation", ).remote_field.model end_date_from_modifications = With( Regulation.objects.annotate(amended_end_date=NullIf( Max( Coalesce( F("amendments__enacting_regulation__effective_end_date" ), EndDate( "amendments__enacting_regulation__valid_between"), Cast(Value("infinity"), DateField()), ), ), Cast(Value("infinity"), DateField()), ), ), "end_date_from_modifications", ) return (end_date_from_modifications.join( self, generating_regulation_id=end_date_from_modifications.col.id, ).with_cte(end_date_from_modifications).annotate( db_effective_end_date=Coalesce( # Case 1 – explicit end date, which is always used if present EndDate("valid_between"), # Case 2 and 3 – end date of regulation F("generating_regulation__effective_end_date"), EndDate("generating_regulation__valid_between"), # Case 4 – generating regulation is a base regulation, and # the modification regulation is end-dated end_date_from_modifications.col.amended_end_date, # Case 5 – generating regulation is a modification regulation, # and the base it modifies is end-dated. Note that the above # means that this only applies if the modification has no end date. F("generating_regulation__amends__effective_end_date"), EndDate("generating_regulation__amends__valid_between"), ), db_effective_valid_between=Func( StartDate("valid_between"), F("db_effective_end_date"), Value("[]"), function="DATERANGE", output_field=TaricDateRangeField(), ), ))
def detail_asset_view(request, asset_id): asset = get_object_or_404(Asset, id=asset_id) findings = Finding.objects.filter(asset=asset).annotate( severity_numm=Case(When(severity="critical", then=Value("0")), When(severity="high", then=Value("1")), When(severity="medium", then=Value("2")), When(severity="low", then=Value("3")), When(severity="info", then=Value("4")), default=Value("1"), output_field=CharField())).annotate( scope_list=ArrayAgg('scopes__name')).order_by( 'severity_numm', 'type', 'updated_at').only( "severity", "status", "engine_type", "risk_info", "vuln_refs", "title", "id", "solution", "updated_at", "type") findings_stats = { 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'new': 0, 'ack': 0, 'cvss_gte_7': 0 } engines_stats = {} references = {} engine_scopes = {} for engine_scope in EnginePolicyScope.objects.all(): engine_scopes.update({ engine_scope.name: { 'priority': engine_scope.priority, 'id': engine_scope.id, 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0 } }) for finding in findings: findings_stats['total'] = findings_stats.get('total', 0) + 1 findings_stats[finding.severity] = findings_stats.get( finding.severity, 0) + 1 if finding.status == 'new': findings_stats['new'] = findings_stats.get('new', 0) + 1 if finding.status == 'ack': findings_stats['ack'] = findings_stats.get('ack', 0) + 1 for fs in finding.scope_list: if fs is not None: c = engine_scopes[fs] engine_scopes[fs].update({ 'total': c['total'] + 1, finding.severity: c[finding.severity] + 1 }) if finding.engine_type not in engines_stats.keys(): engines_stats.update({finding.engine_type: 0}) engines_stats[finding.engine_type] = engines_stats.get( finding.engine_type, 0) + 1 if finding.risk_info["cvss_base_score"] > 7.0: findings_stats['cvss_gte_7'] = findings_stats.get('cvss_gte_7', 0) + 1 if bool(finding.vuln_refs): for ref in finding.vuln_refs.keys(): if ref not in references.keys(): references.update({ref: []}) tref = references[ref] if type(finding.vuln_refs[ref]) is list: tref = tref + finding.vuln_refs[ref] else: tref.append(finding.vuln_refs[ref]) # references.update({ref: list(set(tref))}) references.update({ref: tref}) # Show only unique references references_cleaned = {} for ref in references: references_cleaned.update({ref: sorted(list(set(references[ref])))}) # Related scans scans_stats = { 'performed': Scan.objects.filter(assets__in=[asset]).count(), 'defined': ScanDefinition.objects.filter(assets_list__in=[asset]).count(), 'periodic': ScanDefinition.objects.filter(assets_list__in=[asset], scan_type='periodic').count(), 'ondemand': ScanDefinition.objects.filter(assets_list__in=[asset], scan_type='single').count(), 'running': Scan.objects.filter( assets__in=[asset], status='started').count(), # bug: a regrouper par assets 'lasts': Scan.objects.filter(assets__in=[asset]).order_by('-created_at')[:3] } asset_groups = list( AssetGroup.objects.filter(assets__in=[asset]).only("id")) scan_defs = ScanDefinition.objects.filter( Q(assets_list__in=[asset]) | Q(assetgroups_list__in=asset_groups)).annotate( engine_type_name=F('engine_type__name')).annotate( scan_set_count=Count('scan')) scans = Scan.objects.filter(assets__in=[asset]).values( "id", "title", "status", "summary", "updated_at").annotate(engine_type_name=F('engine_type__name')) # Investigation links investigation_links = [] DEFAULT_LINKS = copy.deepcopy(ASSET_INVESTIGATION_LINKS) for i in DEFAULT_LINKS: if asset.type in i["datatypes"]: if "link" in [*i]: i["link"] = i["link"].replace("%asset%", asset.value) investigation_links.append(i) # Calculate automatically risk grade asset.calc_risk_grade() asset_risk_grade = { 'now': asset.get_risk_grade(), 'day_ago': asset.get_risk_grade(history=1), 'week_ago': asset.get_risk_grade(history=7), 'month_ago': asset.get_risk_grade(history=30), 'year_ago': asset.get_risk_grade(history=365) } return render( request, 'details-asset.html', { 'asset': asset, 'asset_risk_grade': asset_risk_grade, 'findings': findings, 'findings_stats': findings_stats, 'references': references_cleaned, 'scans_stats': scans_stats, 'scans': scans, 'scan_defs': scan_defs, 'investigation_links': investigation_links, 'engines_stats': engines_stats, 'asset_scopes': list(engine_scopes.items()) })
def duty_sentence( self, component_parent: Union["measures.Measure", "measures.MeasureCondition"], ): """ Returns the human-readable "duty sentence" for a Measure or MeasureCondition instance as a string. The returned string value is a (Postgresql) string aggregation of all the measure's "current" components. This operation relies on the `prefix` and `abbreviation` fields being filled in on duty expressions and units, which are not supplied by the TARIC3 XML by default. Strings output by this aggregation should be valid input to the :class:`~measures.parsers.DutySentenceParser`. The string aggregation will be generated using the below SQL: .. code:: SQL STRING_AGG( TRIM( CONCAT( CASE WHEN ( "measures_dutyexpression"."prefix" IS NULL OR "measures_dutyexpression"."prefix" = '' ) THEN ELSE CONCAT("measures_dutyexpression"."prefix",' ') END, CONCAT( "measures_measureconditioncomponent"."duty_amount", CONCAT( CASE WHEN ( "measures_measureconditioncomponent"."duty_amount" IS NOT NULL AND "measures_measureconditioncomponent"."monetary_unit_id" IS NULL ) THEN '%' WHEN "measures_measureconditioncomponent"."duty_amount" IS NULL THEN '' ELSE CONCAT(' ', "measures_monetaryunit"."code") END, CONCAT( CASE WHEN "measures_measurementunit"."abbreviation" IS NULL THEN '' WHEN "measures_measureconditioncomponent"."monetary_unit_id" IS NULL THEN "measures_measurementunit"."abbreviation" ELSE CONCAT(' / ', "measures_measurementunit"."abbreviation") END, CASE WHEN "measures_measurementunitqualifier"."abbreviation" IS NULL THEN ELSE CONCAT( ' / ', "measures_measurementunitqualifier"."abbreviation" ) END ) ) ) ) ), ) AS "duty_sentence" """ # Components with the greatest transaction_id that is less than # or equal to component_parent's transaction_id, are considered 'current'. component_qs = component_parent.components.approved_up_to_transaction( component_parent.transaction, ) if not component_qs: return "" latest_transaction_id = component_qs.aggregate( latest_transaction_id=Max( "transaction_id", ), ).get("latest_transaction_id") component_qs = component_qs.filter( transaction_id=latest_transaction_id) # Aggregate all the current Components for component_parent to form its # duty sentence. duty_sentence = component_qs.aggregate(duty_sentence=StringAgg( expression=Trim( Concat( Case( When( Q(duty_expression__prefix__isnull=True) | Q(duty_expression__prefix=""), then=Value(""), ), default=Concat( F("duty_expression__prefix"), Value(" "), ), ), "duty_amount", Case( When( monetary_unit=None, duty_amount__isnull=False, then=Value("%"), ), When( duty_amount__isnull=True, then=Value(""), ), default=Concat( Value(" "), F("monetary_unit__code"), ), ), Case( When( Q(component_measurement=None) | Q(component_measurement__measurement_unit=None) | Q(component_measurement__measurement_unit__abbreviation =None, ), then=Value(""), ), When( monetary_unit__isnull=True, then=F( "component_measurement__measurement_unit__abbreviation", ), ), default=Concat( Value(" / "), F( "component_measurement__measurement_unit__abbreviation", ), ), ), Case( When( component_measurement__measurement_unit_qualifier__abbreviation =None, then=Value(""), ), default=Concat( Value(" / "), F( "component_measurement__measurement_unit_qualifier__abbreviation", ), ), ), output_field=CharField(), ), ), delimiter=" ", ordering="duty_expression__sid", ), ) return duty_sentence.get("duty_sentence", "")
def annotate_is_published(self): return self.annotate(is_published=Case( When(publish_date__isnull=False, then=Value(True)), default=Value(False), output_field=models.BooleanField(), ))