def exclude_records_by_age_for_column(exclude_config, column): return Case(When(~Q(**exclude_config), then=F(column)), default=0, output_field=IntegerField())
def get_last_two_cached_transactions(self, only_appended, force_append, eon_number=None): is_sender = Q(wallet=self.wallet, sender_merkle_hash_cache__isnull=False, sender_merkle_height_cache__isnull=False) is_active_recipient = Q(recipient=self.wallet, recipient_active_state__isnull=False, passive=False, recipient_merkle_hash_cache__isnull=False, recipient_merkle_height_cache__isnull=False) is_in_tx_set = is_sender | is_active_recipient transfers = Transfer.objects \ .filter( is_in_tx_set, eon_number=eon_number or self.transfer.eon_number, voided=False ).annotate( # avoid expensive joins by using cached index # index=Case( # When(is_sender, then=F('sender_active_state__tx_set_index')), # When(is_active_recipient, then=F('recipient_active_state__tx_set_index')) # ), # add index field to eleminate field heterogeneity index=Case( When(is_sender, then=F('sender_merkle_index')), When(is_active_recipient, then=F('recipient_merkle_index')) ), # add index field to eleminate field heterogeneity height_cache=Case( When(is_sender, then=F('sender_merkle_height_cache')), When(is_active_recipient, then=F( 'recipient_merkle_height_cache')) ), # add index field to eleminate field heterogeneity hash_cache=Case( When(is_sender, then=F('sender_merkle_hash_cache')), When(is_active_recipient, then=F( 'recipient_merkle_hash_cache')) ), # order by new index field # in descending order, since last 2 transactions are those of interest ) if only_appended: transfers.filter(appended=True) if not force_append: transfers = self.filter_transfers_by_transfer_set_index(transfers) transfers = transfers.order_by('-index')[:2] if transfers.count() == 2: return transfers[0], transfers[1] if transfers.count() == 1: return transfers[0], None return None, None
def search_recipes(request, queryset, params): if request.user.is_authenticated: search_prefs = request.user.searchpreference else: search_prefs = SearchPreference() search_string = params.get('query', '').strip() search_rating = int(params.get('rating', 0)) search_keywords = params.getlist('keywords', []) search_foods = params.getlist('foods', []) search_books = params.getlist('books', []) search_steps = params.getlist('steps', []) search_units = params.get('units', None) # TODO I think default behavior should be 'AND' which is how most sites operate with facet/filters based on results search_keywords_or = str2bool(params.get('keywords_or', True)) search_foods_or = str2bool(params.get('foods_or', True)) search_books_or = str2bool(params.get('books_or', True)) search_internal = str2bool(params.get('internal', False)) search_random = str2bool(params.get('random', False)) search_new = str2bool(params.get('new', False)) search_last_viewed = int(params.get('last_viewed', 0)) orderby = [] # only sort by recent not otherwise filtering/sorting if search_last_viewed > 0: last_viewed_recipes = ViewLog.objects.filter( created_by=request.user, space=request.space, created_at__gte=timezone.now() - timedelta(days=14) # TODO make recent days a setting ).order_by('-pk').values_list('recipe__pk', flat=True) last_viewed_recipes = list(dict.fromkeys(last_viewed_recipes))[:search_last_viewed] # removes duplicates from list prior to slicing # return queryset.annotate(last_view=Max('viewlog__pk')).annotate(new=Case(When(pk__in=last_viewed_recipes, then=('last_view')), default=Value(0))).filter(new__gt=0).order_by('-new') # queryset that only annotates most recent view (higher pk = lastest view) queryset = queryset.annotate(recent=Coalesce(Max('viewlog__pk'), Value(0))) orderby += ['-recent'] # TODO create setting for default ordering - most cooked, rating, # TODO create options for live sorting # TODO make days of new recipe a setting if search_new: queryset = ( queryset.annotate(new_recipe=Case( When(created_at__gte=(timezone.now() - timedelta(days=7)), then=('pk')), default=Value(0), )) ) # only sort by new recipes if not otherwise filtering/sorting orderby += ['-new_recipe'] search_type = search_prefs.search or 'plain' if len(search_string) > 0: unaccent_include = search_prefs.unaccent.values_list('field', flat=True) icontains_include = [x + '__unaccent' if x in unaccent_include else x for x in search_prefs.icontains.values_list('field', flat=True)] istartswith_include = [x + '__unaccent' if x in unaccent_include else x for x in search_prefs.istartswith.values_list('field', flat=True)] trigram_include = [x + '__unaccent' if x in unaccent_include else x for x in search_prefs.trigram.values_list('field', flat=True)] fulltext_include = search_prefs.fulltext.values_list('field', flat=True) # fulltext doesn't use field name directly # if no filters are configured use name__icontains as default if len(icontains_include) + len(istartswith_include) + len(trigram_include) + len(fulltext_include) == 0: filters = [Q(**{"name__icontains": search_string})] else: filters = [] # dynamically build array of filters that will be applied for f in icontains_include: filters += [Q(**{"%s__icontains" % f: search_string})] for f in istartswith_include: filters += [Q(**{"%s__istartswith" % f: search_string})] if settings.DATABASES['default']['ENGINE'] in ['django.db.backends.postgresql_psycopg2', 'django.db.backends.postgresql']: language = DICTIONARY.get(translation.get_language(), 'simple') # django full text search https://docs.djangoproject.com/en/3.2/ref/contrib/postgres/search/#searchquery # TODO can options install this extension to further enhance search query language https://github.com/caub/pg-tsquery # trigram breaks full text search 'websearch' and 'raw' capabilities and will be ignored if those methods are chosen if search_type in ['websearch', 'raw']: search_trigram = False else: search_trigram = True search_query = SearchQuery( search_string, search_type=search_type, config=language, ) # iterate through fields to use in trigrams generating a single trigram if search_trigram and len(trigram_include) > 0: trigram = None for f in trigram_include: if trigram: trigram += TrigramSimilarity(f, search_string) else: trigram = TrigramSimilarity(f, search_string) queryset = queryset.annotate(similarity=trigram) filters += [Q(similarity__gt=search_prefs.trigram_threshold)] if 'name' in fulltext_include: filters += [Q(name_search_vector=search_query)] if 'description' in fulltext_include: filters += [Q(desc_search_vector=search_query)] if 'instructions' in fulltext_include: filters += [Q(steps__search_vector=search_query)] if 'keywords' in fulltext_include: filters += [Q(keywords__in=Subquery(Keyword.objects.filter(name__search=search_query).values_list('id', flat=True)))] if 'foods' in fulltext_include: filters += [Q(steps__ingredients__food__in=Subquery(Food.objects.filter(name__search=search_query).values_list('id', flat=True)))] query_filter = None for f in filters: if query_filter: query_filter |= f else: query_filter = f # TODO add order by user settings - only do search rank and annotation if rank order is configured search_rank = ( SearchRank('name_search_vector', search_query, cover_density=True) + SearchRank('desc_search_vector', search_query, cover_density=True) + SearchRank('steps__search_vector', search_query, cover_density=True) ) queryset = queryset.filter(query_filter).annotate(rank=search_rank) orderby += ['-rank'] else: queryset = queryset.filter(name__icontains=search_string) if len(search_keywords) > 0: if search_keywords_or: # TODO creating setting to include descendants of keywords a setting # for kw in Keyword.objects.filter(pk__in=search_keywords): # search_keywords += list(kw.get_descendants().values_list('pk', flat=True)) queryset = queryset.filter(keywords__id__in=search_keywords) else: # when performing an 'and' search returned recipes should include a parent OR any of its descedants # AND other keywords selected so filters are appended using keyword__id__in the list of keywords and descendants for kw in Keyword.objects.filter(pk__in=search_keywords): queryset = queryset.filter(keywords__id__in=list(kw.get_descendants_and_self().values_list('pk', flat=True))) if len(search_foods) > 0: if search_foods_or: # TODO creating setting to include descendants of food a setting queryset = queryset.filter(steps__ingredients__food__id__in=search_foods) else: # when performing an 'and' search returned recipes should include a parent OR any of its descedants # AND other foods selected so filters are appended using steps__ingredients__food__id__in the list of foods and descendants for fd in Food.objects.filter(pk__in=search_foods): queryset = queryset.filter(steps__ingredients__food__id__in=list(fd.get_descendants_and_self().values_list('pk', flat=True))) if len(search_books) > 0: if search_books_or: queryset = queryset.filter(recipebookentry__book__id__in=search_books) else: for k in search_books: queryset = queryset.filter(recipebookentry__book__id=k) if search_rating: queryset = queryset.annotate(rating=Round(Avg(Case(When(cooklog__created_by=request.user, then='cooklog__rating'), default=Value(0))))) if search_rating == -1: queryset = queryset.filter(rating=0) else: queryset = queryset.filter(rating__gte=search_rating) # probably only useful in Unit list view, so keeping it simple if search_units: queryset = queryset.filter(steps__ingredients__unit__id=search_units) # probably only useful in Unit list view, so keeping it simple if search_steps: queryset = queryset.filter(steps__id__in=search_steps) if search_internal: queryset = queryset.filter(internal=True) queryset = queryset.distinct() if search_random: queryset = queryset.order_by("?") else: queryset = queryset.order_by(*orderby) return queryset
def get_products_data( queryset: "QuerySet", export_fields: Set[str], attribute_ids: Optional[List[int]], warehouse_ids: Optional[List[int]], channel_ids: Optional[List[int]], ) -> List[Dict[str, Union[str, bool]]]: """Create data list of products and their variants with fields values. It return list with product and variant data which can be used as import to csv writer and list of attribute and warehouse headers. """ products_with_variants_data = [] product_fields = set( ProductExportFields.HEADERS_TO_FIELDS_MAPPING["fields"].values() ) product_export_fields = export_fields & product_fields product_export_fields.add("variants__id") products_data = ( queryset.annotate( product_weight=Case( When(weight__isnull=False, then=Concat("weight", V(" g"))), default=V(""), output_field=CharField(), ), variant_weight=Case( When( variants__weight__isnull=False, then=Concat("variants__weight", V(" g")), ), default=V(""), output_field=CharField(), ), description_as_str=Cast("description", CharField()), ) .order_by("pk", "variants__pk") .values(*product_export_fields) .distinct("pk", "variants__pk") ) products_relations_data = get_products_relations_data( queryset, export_fields, attribute_ids, channel_ids ) variants_relations_data = get_variants_relations_data( queryset, export_fields, attribute_ids, warehouse_ids, channel_ids ) for product_data in products_data: pk = product_data["id"] variant_pk = product_data.pop("variants__id") product_relations_data: Dict[str, str] = products_relations_data.get(pk, {}) variant_relations_data: Dict[str, str] = variants_relations_data.get( variant_pk, {} ) data = {**product_data, **product_relations_data, **variant_relations_data} products_with_variants_data.append(data) return products_with_variants_data
def fix_suggestion(request, suggestion_id): cluster_colors = { 'unprocessed': 'text-info', 'accepted': 'text-success', 'rejected': 'text-danger' } # Retrieve the Suggestion object if it exists else raise a 404 error suggestion = get_object_or_404(Suggestion.objects.select_related( 'work', 'user', 'work__category').annotate( count_agrees=Count(Case(When(evidence__agrees=True, then=1))), count_disagrees=Count(Case(When(evidence__agrees=False, then=1)))), id=suggestion_id) # Retrieve the Evidence object if it exists evidence = None if request.user.is_authenticated: try: evidence = Evidence.objects.get(user=request.user, suggestion=suggestion) except ObjectDoesNotExist: evidence = None # Retrieve related clusters clusters = WorkCluster.objects.select_related( 'resulting_work', 'resulting_work__category').prefetch_related( 'works', 'works__category', 'checker').filter(origin=suggestion_id).all() colors = [cluster_colors[cluster.status] for cluster in clusters] # Get the previous suggestion, ie. more recent and of the same checked status previous_suggestions_ids = Suggestion.objects.filter( date__gt=suggestion.date, is_checked=suggestion.is_checked).order_by('date').values_list( 'id', flat=True) # If there is no more recent suggestion, and was checked, just pick from not checked suggestions if not previous_suggestions_ids and suggestion.is_checked: previous_suggestions_ids = Suggestion.objects.filter( is_checked=False).order_by('date').values_list('id', flat=True) # Get the next suggestion, ie. less recent and of the same checked status next_suggestions_ids = Suggestion.objects.filter( date__lt=suggestion.date, is_checked=suggestion.is_checked).order_by('-date').values_list( 'id', flat=True) # If there is no less recent suggestion, and wasn't checked, just pick from checked suggestions if not next_suggestions_ids and not suggestion.is_checked: next_suggestions_ids = Suggestion.objects.filter( is_checked=True).order_by('-date').values_list('id', flat=True) context = { 'suggestion': suggestion, 'clusters': zip(clusters, colors) if clusters and colors else None, 'evidence': evidence, 'can_auto_fix': suggestion.can_auto_fix and request.user.is_staff, 'can_close': request.user.is_staff, 'can_reopen': request.user.is_staff, 'next_id': next_suggestions_ids[0] if next_suggestions_ids else None, 'previous_id': previous_suggestions_ids[0] if previous_suggestions_ids else None } return render(request, 'fix/fix_suggestion.html', context)
def list_transactions(request): if request.user.is_anonymous: return redirect("login") print("Getting transactions") variable, monthly, startdate, enddate, ignore, category_ignore, n = GuiaBolsoViews.get_parameters(request) transactions = GuiaBolsoTransaction.objects.filter(user=request.user).order_by('-date').select_related('category') startdate, enddate = GuiaBolsoViews.decide_date_limits(transactions, n, startdate, enddate, monthly) if variable: transactions = transactions.filter(Q(category__predictable = False) & Q(exclude_from_variable = False)) expense_categories = GuiaBolsoCategory.objects.annotate(month_sum = Sum('category_transactions__value')).filter(month_sum__lte = 0).values('id') transactions = transactions.filter(category__in=expense_categories) transactions = transactions.filter(date__gte=startdate) if enddate is not None: transactions = transactions.filter(date__lte=enddate) # Annotate transactions with only ignore by transaction # since we dont want to filter out ignored categories transactions = transactions.annotate(is_ignored = Case( When(code__in=ignore, then=True), default=False, output_field=BooleanField() )) not_ignored_transactions = transactions.filter(is_ignored=False) last_date = enddate if len(transactions) > 0 : last_date = transactions.order_by('-date')[0].date # Generate list of categories based on not ignored transactions categories = GuiaBolsoViews.group_by_category(not_ignored_transactions, last_date) categories = categories.annotate(is_ignored = Case( When(category__code__in=category_ignore, then=True), default=False, output_field=BooleanField() )) # Annotate the transactions with ignoring the category, not that the categories are created transactions = transactions.annotate(is_ignored = Case( When(Q(code__in=ignore) | Q(category__code__in=category_ignore), then=True), default=False, output_field=BooleanField() )) # Update not_ignored list for getting total sum not_ignored_transactions = transactions.filter(is_ignored=False) total = not_ignored_transactions.aggregate(value=Sum('value'))['value'] total_planned = categories.filter(is_ignored=False).aggregate(value=Sum('budget'))['value'] try: token = GuiaBolsoToken.objects.get(user=request.user) except GuiaBolsoToken.DoesNotExist: return redirect('add_token') transactions = transactions.annotate(_date=TruncDate('date')) # if len(transactions) > 100: # transactions = transactions[:100] return render(request, 'GuiaBolso/list_transactions.html', { 'transactions': transactions, 'grouped_transactions': {k: list(v) for k, v in groupby(transactions, attrgetter('_date'))}, 'categories': categories, 'last_updated': token.last_updated, 'variable': variable, 'monthly': monthly, 'startdate': startdate if startdate is not None else datetime.date.today(), 'enddate': enddate if enddate is not None else datetime.date.today(), 'total': total, 'total_planned': total_planned, })
def __init__(self, *args, **kwargs): self.include_args = kwargs.pop('include', {}) self._dataset = kwargs.pop('dataset', None) instance = kwargs.get('instance', None) user = kwargs.pop('user', None) super().__init__(*args, **kwargs) dir = os.path.join(FTP_DIR, user.username, FTP_UPLOADS_DIR) choices = [(None, 'Veuillez sélectionner un fichier')] for path, subdirs, files in os.walk(dir): for name in files: filename = os.path.join(path, name) choices.append((filename, filename[len(dir) + 1:])) self.fields['ftp_file'].choices = choices if user.profile.is_admin: choices = self.Meta.model.EXTRA_FREQUENCY_CHOICES + self.Meta.model.FREQUENCY_CHOICES self.fields['sync_frequency_ftp'].choices = choices self.fields['sync_frequency_dl'].choices = choices if instance: related_profiles = Case( When(pk__in=[m.pk for m in instance.profiles_allowed.all()], then=Value(True)), default=Value(False), output_field=BooleanField(), ) self.fields['profiles_allowed'].queryset = \ Profile.objects.annotate(related=related_profiles).order_by('-related', 'user__username') related_organisations = Case( When(pk__in=[ m.pk for m in instance.organisations_allowed.all() ], then=Value(True)), default=Value(False), output_field=BooleanField(), ) self.fields['organisations_allowed'].queryset = \ Organisation.objects.annotate(related=related_organisations).order_by('-related', 'slug') if instance.up_file: self.fields['up_file'].widget.attrs['value'] = instance.up_file elif instance.ftp_file: self.fields[ 'synchronisation_ftp'].initial = instance.synchronisation self.fields[ 'sync_frequency_ftp'].initial = instance.sync_frequency try: instance.ftp_file.file except FileNotFoundError: self.fields['ftp_file'] = forms.CharField( label= "Fichier initialement déposé sur votre compte FTP (ce fichier n'est plus détecté) :", required=False, widget=forms.TextInput(attrs={ 'class': 'disabled', 'disabled': True, }, ), ) elif instance.dl_url: self.fields[ 'synchronisation_dl'].initial = instance.synchronisation self.fields[ 'sync_frequency_dl'].initial = instance.sync_frequency
def homepage_dashboard_view(request): teamid_selected = -1 if settings.PRO_EDITION is True and int(request.GET.get('team', -1)) >= 0: teamid = int(request.GET.get('team')) # @Todo: ensure the team is allowed for this user teamid_selected = teamid # Findings if teamid_selected >= 0: findings = Finding.objects.for_team(request.user, teamid_selected).all().only( "status", "severity") assets = Asset.objects.for_team(request.user, teamid_selected).all() assetgroups = AssetGroup.objects.for_team(request.user, teamid_selected).all() scan_definitions = ScanDefinition.objects.for_team( request.user, teamid_selected).all() scans = Scan.objects.for_team(request.user, teamid_selected).all() alerts_new = Alert.objects.for_team( request.user, teamid_selected).filter(status="new") else: findings = Finding.objects.for_user(request.user).all().only( "status", "severity") assets = Asset.objects.for_user(request.user).all() assetgroups = AssetGroup.objects.for_user(request.user).all() scan_definitions = ScanDefinition.objects.for_user(request.user).all() scans = Scan.objects.for_user(request.user).all() alerts_new = Alert.objects.for_user(request.user).filter(status="new") global_stats = { "assets": { "total": assets.count(), "total_ag": assetgroups.count(), }, "asset_types": {}, "findings": {}, "scans": { # "defined": ScanDefinition.objects.for_user(request.user).all().count(), "defined": scan_definitions.count(), # "performed": Scan.objects.for_user(request.user).all().count(), "running": scans.filter(status="started").count(), "enqueued": scans.filter(status="enqueued").count(), "performed": scans.count(), # "active_periodic": ScanDefinition.objects.for_user(request.user).filter(enabled=True, scan_type='periodic').count(), "active_periodic": scan_definitions.filter(enabled=True, scan_type='periodic').count(), }, "engines": { "total": EngineInstance.objects.all().count(), "policies": EnginePolicy.objects.all().count(), "active": EngineInstance.objects.filter(status='READY', enabled=True).count(), }, "rules": { "total": Rule.objects.all().count(), "active": Rule.objects.filter(enabled=True).count(), "nb_matches": 0, }, "alerts": { "total_new": alerts_new.count(), "new_info": alerts_new.filter(severity="info").count(), "new_low": alerts_new.filter(severity="low").count(), "new_medium": alerts_new.filter(severity="medium").count(), "new_high": alerts_new.filter(severity="high").count(), "new_critical": alerts_new.filter(severity="critical").count(), }, } # asset types asset_types_stats_params = {} for at in ASSET_TYPES: asset_types_stats_params.update({ at[0]: Coalesce( Sum(Case(When(type=at[0], then=1)), output_field=models.IntegerField()), 0) }) global_stats["asset_types"] = assets.aggregate(**asset_types_stats_params) # finding counters findings_stats = findings.aggregate( nb_new=Coalesce( Sum(Case(When(status='new', then=1)), output_field=models.IntegerField()), 0), nb_critical=Coalesce( Sum(Case(When(severity='critical', then=1)), output_field=models.IntegerField()), 0), nb_high=Coalesce( Sum(Case(When(severity='high', then=1)), output_field=models.IntegerField()), 0), nb_medium=Coalesce( Sum(Case(When(severity='medium', then=1)), output_field=models.IntegerField()), 0), nb_low=Coalesce( Sum(Case(When(severity='low', then=1)), output_field=models.IntegerField()), 0), nb_info=Coalesce( Sum(Case(When(severity='info', then=1)), output_field=models.IntegerField()), 0), ) global_stats["findings"] = { # "total_raw": RawFinding.objects.count(), # "total_raw": RawFinding.objects.count(), # "total": findings.count(), "total": findings_stats["nb_critical"] + findings_stats["nb_high"] + findings_stats["nb_medium"] + findings_stats["nb_low"] + findings_stats["nb_info"], "new": findings_stats["nb_new"], "critical": findings_stats["nb_critical"], "high": findings_stats["nb_high"], "medium": findings_stats["nb_medium"], "low": findings_stats["nb_low"], "info": findings_stats["nb_info"], } # update nb_matches matches = 0 for r in Rule.objects.all(): matches += r.nb_matches global_stats["rules"].update({"nb_matches": matches}) # Last 6 findings # last_findings = Finding.objects.for_user(request.user).all().order_by('-id')[:6][::-1] last_findings = findings.order_by('-id')[:6][::-1] # Last 6 scans # last_scans = Scan.objects.for_user(request.user).all().order_by('-started_at')[:6] last_scans = scans.order_by('-started_at')[:6] # Asset grades repartition and TOP 10 asset_grades_map = { "A": { "high": 0, "medium": 0, "low": 0 }, "B": { "high": 0, "medium": 0, "low": 0 }, "C": { "high": 0, "medium": 0, "low": 0 }, "D": { "high": 0, "medium": 0, "low": 0 }, "E": { "high": 0, "medium": 0, "low": 0 }, "F": { "high": 0, "medium": 0, "low": 0 }, "-": { "high": 0, "medium": 0, "low": 0 } } assetgroup_grades_map = copy.deepcopy(asset_grades_map) # Asset grades assets_risk_scores = {} for asset in assets.only("risk_level", "criticity", "id"): asset_grades_map[asset.risk_level["grade"]].update({ asset.criticity: asset_grades_map[asset.risk_level["grade"]][asset.criticity] + 1 }) assets_risk_scores.update({asset.id: asset.get_risk_score()}) top_critical_assets_scores = sorted(assets_risk_scores.items(), key=operator.itemgetter(1))[::-1][:6] tcas_id_list = [id for id, score in top_critical_assets_scores] top_critical_assets = list(assets.filter(id__in=tcas_id_list)) top_critical_assets.sort(key=lambda t: tcas_id_list.index(t.id)) # Format to list asset_grades_map_list = [] for key in sorted(asset_grades_map.keys()): asset_grades_map_list.append({key: asset_grades_map[key]}) # Asset groups assetgroups_risk_scores = {} # ags = AssetGroup.objects.for_user(request.user).all().only("risk_level", "criticity", "id", "name") ags = assetgroups.only("risk_level", "criticity", "id", "name") for assetgroup in ags: assetgroup_grades_map[assetgroup.risk_level["grade"]].update({ assetgroup.criticity: assetgroup_grades_map[assetgroup.risk_level["grade"]][ assetgroup.criticity] + 1 }) assetgroups_risk_scores.update( {assetgroup.id: assetgroup.get_risk_score()}) top_critical_assetgroups_scores = sorted( assetgroups_risk_scores.items(), key=operator.itemgetter(1))[::-1][:6] tcags_id_list = [id for id, score in top_critical_assetgroups_scores] top_critical_assetgroups = list(ags.filter(id__in=tcags_id_list)) top_critical_assetgroups.sort(key=lambda t: tcags_id_list.index(t.id)) assetgroup_grades_map_list = [] for key in sorted(assetgroup_grades_map.keys()): assetgroup_grades_map_list.append({key: assetgroup_grades_map[key]}) # Critical findings top_critical_findings = [] MAX_CF = 6 for finding in findings.filter(severity="critical").only( "id", "severity", "title", "asset_name"): if len(top_critical_findings) <= MAX_CF: top_critical_findings.append(finding) if len(top_critical_findings) <= MAX_CF: for finding in findings.filter(severity="high").only( "id", "severity", "title", "asset_name"): if len(top_critical_findings) <= MAX_CF: top_critical_findings.append(finding) if len(top_critical_findings) <= MAX_CF: for finding in findings.filter(severity="medium").only( "id", "severity", "title", "asset_name"): if len(top_critical_findings) <= MAX_CF: top_critical_findings.append(finding) if len(top_critical_findings) <= MAX_CF: for finding in findings.filter(severity="low").only( "id", "severity", "title", "asset_name"): if len(top_critical_findings) <= MAX_CF: top_critical_findings.append(finding) if len(top_critical_findings) <= MAX_CF: for finding in findings.filter(severity="info").only( "id", "severity", "title", "asset_name"): if len(top_critical_findings) <= MAX_CF: top_critical_findings.append(finding) # CVSS cvss_scores = {'lte5': 0, '5to7': 0, 'gte7': 0, 'gte9': 0, 'eq10': 0} # for finding in findings.only("risk_info"): # if finding.risk_info["cvss_base_score"] < 5.0: cvss_scores.update({'lte5': cvss_scores['lte5']+1}) # if finding.risk_info["cvss_base_score"] >= 5.0 and finding.risk_info["cvss_base_score"] <= 7.0: cvss_scores.update({'5to7': cvss_scores['5to7']+1}) # if finding.risk_info["cvss_base_score"] >= 7.0: cvss_scores.update({'gte7': cvss_scores['gte7']+1}) # if finding.risk_info["cvss_base_score"] >= 9.0 and finding.risk_info["cvss_base_score"] < 10: cvss_scores.update({'gte9': cvss_scores['gte9']+1}) # if finding.risk_info["cvss_base_score"] == 10.0: cvss_scores.update({'eq10': cvss_scores['eq10']+1}) for finding in findings.prefetch_related( "risk_info__cvss_base_score").only("risk_info"): if finding.risk_info["cvss_base_score"] < 5.0: cvss_scores.update({'lte5': cvss_scores['lte5'] + 1}) if finding.risk_info["cvss_base_score"] >= 5.0 and finding.risk_info[ "cvss_base_score"] <= 7.0: cvss_scores.update({'5to7': cvss_scores['5to7'] + 1}) if finding.risk_info["cvss_base_score"] >= 7.0: cvss_scores.update({'gte7': cvss_scores['gte7'] + 1}) if finding.risk_info["cvss_base_score"] >= 9.0 and finding.risk_info[ "cvss_base_score"] < 10: cvss_scores.update({'gte9': cvss_scores['gte9'] + 1}) if finding.risk_info["cvss_base_score"] == 10.0: cvss_scores.update({'eq10': cvss_scores['eq10'] + 1}) # CVE & CWE cxe_stats = {} cve_list = {} cwe_list = {} # finding_cves_list = Finding.objects.for_user(request.user).exclude( finding_cves_list = findings.exclude( Q(vuln_refs__CVE__isnull=True) | Q(status__in=['mitigated', 'patched', 'closed', 'false-positive']) ).annotate(cvelist=KeyTextTransform("CVE", 'vuln_refs')).values('cvelist') # finding_cwes_list = Finding.objects.for_user(request.user).exclude( finding_cwes_list = findings.exclude( Q(vuln_refs__CWE__isnull=True) | Q(status__in=['mitigated', 'patched', 'closed', 'false-positive']) ).annotate(cwelist=KeyTextTransform("CWE", 'vuln_refs')).values('cwelist') for finding_cves in finding_cves_list: if finding_cves['cvelist'] is not None: for cve in ast.literal_eval(finding_cves['cvelist']): if cve not in cve_list.keys(): cve_list.update({cve: 1}) else: cve_list.update({cve: cve_list[cve] + 1}) for cwe_data in finding_cwes_list: cwe = list(cwe_data.values())[0] if cwe not in cwe_list.keys(): cwe_list.update({cwe: 1}) else: cwe_list.update({cwe: cwe_list[cwe] + 1}) cxe_stats.update({ 'top_cve': sorted(cve_list.items(), key=lambda x: x[1], reverse=True)[:10], 'top_cwe': sorted(cwe_list.items(), key=lambda x: x[1], reverse=True)[:10], }) teams = [] if settings.PRO_EDITION and request.user.is_superuser: teams = Team.objects.all().order_by('name') elif settings.PRO_EDITION and not request.user.is_superuser: for tu in TeamUser.objects.filter(user=request.user, organization__is_active=True): teams.append({ 'id': tu.organization.id, 'name': tu.organization.name }) return render( request, 'home-dashboard.html', { 'global_stats': global_stats, 'last_findings': last_findings, 'last_scans': last_scans, 'asset_grades_map': asset_grades_map_list, 'assetgroup_grades_map': assetgroup_grades_map_list, 'top_critical_assets': top_critical_assets, 'top_critical_assetgroups': top_critical_assetgroups, 'top_critical_findings': top_critical_findings, 'cvss_scores': cvss_scores, 'cxe_stats': cxe_stats, 'teams': teams })
def populate_aggregated_table(apps, schema_editor): """Make the first character the main.""" AggregatedGuildMemberWarStats = apps.get_model('bdo', 'AggregatedGuildMemberWarStats') AggregatedGuildWarStats = apps.get_model('bdo', 'AggregatedGuildWarStats') AggregatedUserWarStats = apps.get_model('bdo', 'AggregatedUserWarStats') Guild = apps.get_model('bdo', 'Guild') GuildMember = apps.get_model('bdo', 'GuildMember') Profile = apps.get_model('bdo', 'Profile') WarAttendance = apps.get_model('bdo', 'WarAttendance') WarStat = apps.get_model('bdo', 'WarStat') fields = [ 'command_post', 'death', 'fort', 'gate', 'guild_master', 'help', 'id', 'member', 'mount', 'officer', 'placed_objects', 'siege_weapons' ] attendance_field_annotations = { "wars_attended": Count('*') } base_field_annotations = { field: Sum(field) for field in fields } extended_field_annotations = { **base_field_annotations, **attendance_field_annotations } missed_qs = (WarAttendance.objects.filter(user_profile_id=OuterRef('user_profile')) .values('user_profile_id') .order_by('user_profile_id') .annotate(war_missed=Count(Case(When(is_attending=3, then=1)))) .values('war_missed')) unavailable_qs = (WarAttendance.objects.filter(user_profile_id=OuterRef('user_profile')) .values('user_profile_id') .order_by('user_profile_id') .annotate(war_missed=Count(Case(When(is_attending=1, then=1)))) .values('war_missed')) total_kills_expr = F('guild_master') + F('officer') + F('member') + F('siege_weapons') kdr_expr = Case(When(death=0, then=0.0), default=(total_kills_expr * 1.0 / F('death')), output_field=FloatField()) # Calculate aggregated guild stats guild_stats = ( WarStat.objects.values('attendance__war__guild') .annotate(**base_field_annotations) ) aggregated_stats = [ AggregatedGuildWarStats( guild_id=guild_stat['attendance__war__guild'], **{ field: value for field, value in guild_stat.items() if field not in ['attendance__war__guild', 'id'] } ) for guild_stat in guild_stats ] AggregatedGuildWarStats.objects.bulk_create(aggregated_stats) # Calculate the total kills and kdr stats AggregatedGuildMemberWarStats.objects.update(total_kills=total_kills_expr, kdr=kdr_expr) # Calculate aggregated guild member stats guild_member_stats = (WarStat.objects.values('attendance__war__guild', 'attendance__user_profile') .annotate(**extended_field_annotations)) aggregated_stats = [ AggregatedGuildMemberWarStats( guild_id=member_stat['attendance__war__guild'], user_profile_id=member_stat['attendance__user_profile'], **{ field: value for field, value in member_stat.items() if field not in ['attendance__war__guild', 'attendance__user_profile', 'id'] } ) for member_stat in guild_member_stats ] AggregatedGuildMemberWarStats.objects.bulk_create(aggregated_stats) # Calculate the missed, unavailable, total kills and kdr stats (AggregatedGuildMemberWarStats.objects.annotate(missed=Subquery(missed_qs), unavailable=Subquery(unavailable_qs)) .update(wars_missed=F('missed'), wars_unavailable=F('unavailable'), total_kills=total_kills_expr, kdr=kdr_expr)) # Calculate aggregated user stats user_stats = ( WarStat.objects.values('attendance__user_profile') .annotate(**extended_field_annotations) ) AggregatedUserWarStats.objects.bulk_create([ AggregatedUserWarStats( user_profile_id=user_stat['attendance__user_profile'], **{ field: value for field, value in user_stat.items() if field not in ['attendance__user_profile', 'id'] } ) for user_stat in user_stats ]) # Calculate the missed, unavailable, total kills and kdr stats (AggregatedUserWarStats.objects.annotate(missed=Subquery(missed_qs), unavailable=Subquery(unavailable_qs)) .update(wars_missed=F('missed'), wars_unavailable=F('unavailable'), total_kills=total_kills_expr, kdr=kdr_expr)) # Generate stub rows AggregatedGuildWarStats.objects.bulk_create([ AggregatedGuildWarStats(guild=guild) for guild in Guild.objects.exclude(id__in=AggregatedGuildWarStats.objects.values_list('guild')) ]) member_tuples = AggregatedGuildMemberWarStats.objects.values_list('guild', 'user_profile') new_aggregated_members = [] for member in GuildMember.objects.all(): if (member.guild_id, member.user_id) in member_tuples: continue new_aggregated_members.append(AggregatedGuildMemberWarStats(guild=member.guild, user_profile=member.user)) AggregatedGuildMemberWarStats.objects.bulk_create(new_aggregated_members) AggregatedGuildMemberWarStats.objects.bulk_create([ AggregatedGuildMemberWarStats(guild=guild) for guild in Guild.objects.exclude(id__in=AggregatedGuildWarStats.objects.values_list('guild')) ]) AggregatedUserWarStats.objects.bulk_create([ AggregatedUserWarStats(user_profile=profile) for profile in Profile.objects.exclude(id__in=AggregatedUserWarStats.objects.values_list('user_profile')) ])
def vote(request): questionId = request.POST.get('questionId') action = request.POST.get('action') question_posted_by = Questions.objects.get( questionId=questionId).User.UserId score_of_person_voting = UserDetail.objects.get( UserId=loggedUser(request)).Score question_owner_score = Questions.objects.get( questionId=questionId).User.Score if question_posted_by == loggedUser(request): return JsonResponse({ 'Response': "You can't vote your own Question", 'flag': False }) elif score_of_person_voting < 10: return JsonResponse({ 'Response': "You don't have enough score!", 'flag': False }) if action == 'up': voteType = 1 total_score_change = 10 elif action == 'down': voteType = -1 total_score_change = -10 else: pass voteRecord = QuestionVotes.objects.using('second').filter( questionId=questionId, userId=loggedUser(request)) upVoteRecord = voteRecord.using('second').filter(voteType=1) downVoteRecord = voteRecord.using('second').filter(voteType=-1) voteCount = Questions.objects.get(questionId=questionId).totalVotes if upVoteRecord and action == 'up': print('1') msg = 'Already Upvoted' count = 0 pass elif upVoteRecord and action == 'down': print('2') msg = 'You downvoted this question' count = -1 Questions.objects.filter(questionId=questionId).update( totalVotes=voteCount - 1) UserDetail.objects.filter(UserId=question_posted_by).update( Score=question_owner_score + total_score_change) elif downVoteRecord and action == 'down': print('3') msg = 'Already Downvoted' count = 0 pass elif downVoteRecord and action == 'up': print('4') msg = 'You upvoted this question' count = 1 Questions.objects.filter(questionId=questionId).update( totalVotes=voteCount + 1) UserDetail.objects.filter(UserId=question_posted_by).update( Score=question_owner_score + total_score_change) if voteRecord and action == 'up': QuestionVotes.objects.using('second').filter( questionId=questionId, userId=loggedUser(request)).update(voteType=Case( When(voteType=1, then=Value(1)), When(voteType=-1, then=Value(1)), )) elif voteRecord and action == 'down': QuestionVotes.objects.using('second').filter( questionId=questionId, userId=loggedUser(request)).update(voteType=Case( When(voteType=1, then=Value(-1)), When(voteType=-1, then=Value(-1)), )) else: QuestionVotes(questionId=questionId, userId=loggedUser(request), voteType=voteType).save(using='second') Questions.objects.filter(questionId=questionId).update( totalVotes=voteCount + voteType) msg = 'You ' + action + 'voted this question' count = voteType UserDetail.objects.filter(UserId=question_posted_by).update( Score=question_owner_score + total_score_change) outcome = {'Response': msg, 'count': count, 'action': action} return JsonResponse(outcome)
def _all(self): qs = self.annotate( diff=F("score_for") - F("score_against"), margin=Func( F("score_for") - F("score_against"), function="ABS", output_field=FloatField(), ), ) qs = qs.select_related("team__club") qs = qs.annotate( division=Case( When(team__rank_division__isnull=False, then=F("team__rank_division")), When( team__division__rank_division__isnull=False, then=F("team__division__rank_division"), ), ), opponent_division=Case( When( opponent__rank_division__isnull=False, then=F("opponent__rank_division"), ), When( opponent__division__rank_division__isnull=False, then=F("opponent__division__rank_division"), ), ), ) qs = qs.annotate(importance=Case( When( match__rank_importance__isnull=False, then=F("match__rank_importance"), ), When( match__stage_group__rank_importance__isnull=False, then=F("match__stage_group__rank_importance"), ), When( match__stage__rank_importance__isnull=False, then=F("match__stage__rank_importance"), ), When( match__stage__division__rank_importance__isnull=False, then=F("match__stage__division__rank_importance"), ), When( match__stage__division__season__rank_importance__isnull=False, then=F("match__stage__division__season__rank_importance"), ), When( match__stage__division__season__competition__rank_importance__isnull =False, then= F("match__stage__division__season__competition__rank_importance" ), ), output_field=FloatField(), ), ) RANK_POINTS_FUNC = getattr( settings, "TOURNAMENTCONTROL_RANK_POINTS_FUNC", "tournamentcontrol.competition.rank.points_func", ) rank_points = import_string(RANK_POINTS_FUNC) qs = qs.annotate(rank_points=ExpressionWrapper( rank_points() * F("importance"), output_field=FloatField(), ), ) return qs
def answervote(request): questionid = request.POST.get('questionId') answerid = request.POST.get('answerId') action = request.POST.get('action') score_of_person_voting = UserDetail.objects.get( UserId=loggedUser(request)).Score questionObj = Questions.objects.get(questionId=questionid) allAnswers = questionObj.answers for ans in allAnswers: if str(ans['answerId']) == answerid: voteCount = ans['totalVotes'] answer_posted_by = ans['User'] answer_owner_score = UserDetail.objects.get( UserId=answer_posted_by).Score break if answer_posted_by == loggedUser(request): return JsonResponse({ 'Response': "You can't vote your own Answer", 'flag': False }) elif score_of_person_voting < 10: return JsonResponse({ 'Response': "You don't have enough score!", 'flag': False }) if action == 'up': voteType = 1 total_score_change = 10 elif action == 'down': voteType = -1 total_score_change = -10 else: pass voteRecord = AnswerVotes.objects.using('second').filter( answerId=answerid, userId=loggedUser(request)) upVoteRecord = voteRecord.using('second').filter(voteType=1) downVoteRecord = voteRecord.using('second').filter(voteType=-1) if upVoteRecord and action == 'up': print('1') msg = 'Already Upvoted' count = 0 pass elif upVoteRecord and action == 'down': print('2') msg = 'You downvoted this answer' count = -1 ans['totalVotes'] -= 1 questionObj.save() UserDetail.objects.filter(UserId=answer_posted_by).update( Score=answer_owner_score + total_score_change) elif downVoteRecord and action == 'down': print('3') msg = 'Already Downvoted' count = 0 pass elif downVoteRecord and action == 'up': print('4') msg = 'You upvoted this answer' count = 1 ans['totalVotes'] += 1 questionObj.save() UserDetail.objects.filter(UserId=answer_posted_by).update( Score=answer_owner_score + total_score_change) if voteRecord and action == 'up': AnswerVotes.objects.using('second').filter( answerId=answerid, userId=loggedUser(request)).update(voteType=Case( When(voteType=1, then=Value(1)), When(voteType=-1, then=Value(1)), )) elif voteRecord and action == 'down': AnswerVotes.objects.using('second').filter( answerId=answerid, userId=loggedUser(request)).update(voteType=Case( When(voteType=1, then=Value(-1)), When(voteType=-1, then=Value(-1)), )) else: AnswerVotes(answerId=answerid, userId=loggedUser(request), voteType=voteType).save(using='second') ans['totalVotes'] += voteType questionObj.save() msg = 'You ' + action + 'voted this question' count = voteType UserDetail.objects.filter(UserId=answer_posted_by).update( Score=answer_owner_score + total_score_change) return JsonResponse({'Response': msg, 'count': count, 'action': action})
def get(self, request, **kwargs): filter_form = self.filter_form_class(request.GET) search_form = self.search_form_class(request.GET) sort_form = self.sort_form_class(request.GET) def GetAttractionsByCategory(category_ids_list): attr_ids = [] attr_list = [] for cat_id in category_ids_list: for obj in Category.objects.get( pk=cat_id).attraction_set.all(): attr_list.append(obj) for obj in attr_list: attr_ids.append(obj.id) return attr_ids def GetAttractionsByPrice(price_min, price_max): attr_ids = [] for ticket in Ticket.objects.filter(price__gte=price_min, price__lte=price_max): attr_ids.append(ticket.attraction.id) return attr_ids if search_form.is_valid(): search_fraze = request.GET.get('search_fraze', "") attractions = Attraction.objects.filter( Q(name__icontains=search_fraze) | Q(description__icontains=search_fraze)) return render( request, self.template_name, { "filter_form": filter_form, "attractions_obj": attractions, "categories": Category.objects.all(), 'page': 'attraction' }) sort_keys = [key for key in request.GET.getlist('sort_key', [])] if len(sort_keys) != 0: found = False for key in sort_keys: if key: if key == "price" or key == "-price": attr_ids = [ attr.attraction.id for attr in Ticket.objects.all().order_by(key) ] tickets_order = Case(*[ When(pk=pk, then=pos) for pos, pk in enumerate(attr_ids) ]) attractions = Attraction.objects.filter( id__in=attr_ids).order_by(tickets_order) else: attractions = Attraction.objects.all().order_by(key) found = True break if not found: attractions = Attraction.objects.all().order_by('name') return render( request, self.template_name, { "filter_form": filter_form, "attractions_obj": attractions, "categories": Category.objects.all(), 'page': 'attraction' }) if filter_form.is_valid(): if request.GET.getlist('categories'): category_ids = [ cat_id for cat_id in request.GET.getlist('categories') ] else: category_ids = [cat_id.id for cat_id in Category.objects.all()] price_min = request.GET.get('price_min', 0) price_max = request.GET.get('price_max', 1000) time_min = request.GET.get('time_min', 0) time_max = request.GET.get('time_max', 1000) attractions = Attraction.objects.filter( id__in=GetAttractionsByCategory(category_ids)).filter( id__in=GetAttractionsByPrice(price_min, price_max)).filter( time_minutes__gte=time_min, time_minutes__lte=time_max).order_by('name') return render( request, self.template_name, { "filter_form": filter_form, "attractions_obj": attractions, "categories": Category.objects.all(), 'page': 'attraction' }) return render( request, self.template_name, { "filter_form": filter_form, "attractions_obj": Attraction.objects.all().order_by('name'), "categories": Category.objects.all(), 'page': 'attraction' })
def add_culling_percentage(self): data = dict() data['ws3_padej_percentage'] = Case( When(Q(total_born_alive__isnull=True) | Q(total_born_alive=0), then=0.0), When(total_born_alive__gt=0, then=ExpressionWrapper(F('ws3_padej_quantity') * 100.0 / F('total_born_alive'), output_field=models.FloatField())), output_field=models.FloatField()) data['ws3_prirezka_percentage'] = Case( When(Q(total_born_alive__isnull=True) | Q(total_born_alive=0), then=0.0), When(total_born_alive__gt=0, then=ExpressionWrapper(F('ws3_prirezka_quantity') * 100.0 / F('total_born_alive'), output_field=models.FloatField())), output_field=models.FloatField()) for ws_number, place_number in zip( [4, 8, 5, 6, 7], ['3_4', '4_8', '8_5', '8_6', '8_7']): lookup1 = { f'week_weight_qnty_{place_number}__isnull': True, } lookup2 = { f'week_weight_qnty_{place_number}': 0, } lookup3 = { f'week_weight_qnty_{place_number}__gt': 0, } data[f'ws{ws_number}_padej_percentage'] = Case( When(Q(**lookup1) | Q(**lookup2), then=0.0), When(**lookup3, then=ExpressionWrapper( F(f'ws{ws_number}_padej_quantity') * 100.0 / F(f'week_weight_qnty_{place_number}'), output_field=models.FloatField())), output_field=models.FloatField()) if ws_number != 4: data[f'ws{ws_number}_vinuzhd_percentage'] = Case( When(Q(**lookup1) | Q(**lookup2), then=0.0), When(**lookup3, then=ExpressionWrapper( F(f'ws{ws_number}_vinuzhd_quantity') * 100.0 / F(f'week_weight_qnty_{place_number}'), output_field=models.FloatField())), output_field=models.FloatField()) if ws_number in [4, 8]: data[f'ws{ws_number}_prirezka_percentage'] = Case( When(Q(**lookup1) | Q(**lookup2), then=0.0), When(**lookup3, then=ExpressionWrapper( F(f'ws{ws_number}_prirezka_quantity') * 100.0 / F(f'week_weight_qnty_{place_number}'), output_field=models.FloatField())), output_field=models.FloatField()) return self.annotate(**data)
def test_case_aggregate(self): agg = Sum( Case(When(friends__age=40, then=F('friends__age'))), filter=Q(friends__name__startswith='test'), ) self.assertEqual(Author.objects.aggregate(age=agg)['age'], 80)
def get_other_units(unit): """Returns other units to show while translating.""" result = { "total": 0, "skipped": False, "same": [], "matching": [], "context": [], "source": [], } allow_merge = False untranslated = False translation = unit.translation component = translation.component propagation = component.allow_translation_propagation same = None if unit.source and unit.context: match = Q(source=unit.source) & Q(context=unit.context) if component.has_template(): query = Q(source=unit.source) | Q(context=unit.context) else: query = Q(source=unit.source) elif unit.source: match = Q(source=unit.source) & Q(context="") query = Q(source=unit.source) elif unit.context: match = Q(context=unit.context) query = Q(context=unit.context) else: return result units = (Unit.objects.filter( query, translation__component__project=component.project, translation__language=translation.language, ).annotate(matches_current=Case( When(condition=match, then=1), default=0, output_field=IntegerField())).order_by("-matches_current")) units_count = units.count() # Is it only this unit? if units_count == 1: return result result["total"] = units_count result["skipped"] = units_count > 20 for item in units[:20]: item.allow_merge = item.differently_translated = ( item.translated and item.target != unit.target) item.is_propagated = ( propagation and item.translation.component.allow_translation_propagation and item.translation.plural_id == translation.plural_id and item.source == unit.source and item.context == unit.context) untranslated |= not item.translated allow_merge |= item.allow_merge if item.pk == unit.pk: same = item result["same"].append(item) elif item.source == unit.source and item.context == unit.context: result["matching"].append(item) elif item.source == unit.source: result["source"].append(item) elif item.context == unit.context: result["context"].append(item) # Slightly different logic to allow applying current translation to # the propagated strings if same is not None: same.allow_merge = ((untranslated or allow_merge) and same.translated and propagation) allow_merge |= same.allow_merge result["total"] = sum( len(result[x]) for x in ("matching", "source", "context")) result["allow_merge"] = allow_merge return result
def biodiversity_data(self): if not self.search_filters: return {} search = Search(self.search_filters) collection_results = search.process_search() biodiversity_data = dict() groups = TaxonGroup.objects.filter( category=TaxonomicGroupCategory.SPECIES_MODULE.name ) for group in groups: group_data = dict() group_data[self.GROUP_ICON] = group.logo.name group_data[self.MODULE] = group.id biodiversity_data[group.name] = group_data taxa = group.taxonomies.all() taxa_children = self.get_all_taxa_children(taxa) taxa_children_ids = list( taxa_children.values_list('id', flat=True) ) group_records = collection_results.filter( taxonomy__in=taxa_children_ids ) group_records_id = list(group_records.values_list('id', flat=True)) if group_records_id: self.is_sass_exist = SiteVisitTaxon.objects.filter( id__in=group_records_id).exists() group_data[self.GROUP_OCCURRENCES] = group_records.count() group_data[self.GROUP_SITES] = group_records.distinct( 'site' ).count() group_data[self.GROUP_NUM_OF_TAXA] = group_records.distinct( 'taxonomy' ).count() group_data[self.GROUP_ENDEMISM] = group_records.annotate( name=Case(When(taxonomy__endemism__isnull=False, then=F('taxonomy__endemism__name')), default=Value('Unspecified')) ).values( 'name' ).annotate( count=Count('name') ).values( 'name', 'count' ).order_by('name') group_origins = group_records.annotate( name=Case(When(category__isnull=False, then=F('category')), default=Value('Unspecified')) ).values( 'name' ).annotate( count=Count('name') ).values( 'name', 'count' ).order_by('name') if group_origins: category = dict(BiologicalCollectionRecord.CATEGORY_CHOICES) for group_origin in group_origins: if group_origin['name'] in category: group_origin['name'] = category[group_origin['name']] group_data[self.GROUP_ORIGIN] = group_origins all_cons_status = group_records.annotate( name=Case(When(taxonomy__iucn_status__isnull=False, then=F('taxonomy__iucn_status__category')), default=Value('Unspecified')) ).values( 'name' ).annotate( count=Count('name') ).values( 'name', 'count' ).order_by('name') if all_cons_status: category = dict(IUCNStatus.CATEGORY_CHOICES) for cons_status in all_cons_status: if cons_status['name'] in category: cons_status['name'] = category[cons_status['name']] group_data[self.GROUP_CONS_STATUS] = all_cons_status return biodiversity_data
def filter_by(self, config, column, default=0): return Case(When(Q(**config), then=F(column)), default=default, output_field=IntegerField())
def set_initial_value_for_is_muted(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None: Subscription = apps.get_model("zerver", "Subscription") Subscription.objects.update(is_muted=Case( When(in_home_view=True, then=Value(False)), When(in_home_view=False, then=Value(True)), ))
def construct_loan_queryset(self, faba_grouping_column, base_model, base_model_column): grouping_key = F(faba_grouping_column) if isinstance(faba_grouping_column, str) else faba_grouping_column base_values = With( FinancialAccountsByAwards.objects.filter( Q(award__type__in=loan_type_mapping), self.all_closed_defc_submissions, self.is_in_provided_def_codes, ) .annotate( grouping_key=grouping_key, total_loan_value=F("award__total_loan_value"), reporting_fiscal_year=F("submission__reporting_fiscal_year"), reporting_fiscal_period=F("submission__reporting_fiscal_period"), quarter_format_flag=F("submission__quarter_format_flag"), ) .filter(grouping_key__isnull=False) .values( "grouping_key", "financial_accounts_by_awards_id", "award_id", "transaction_obligated_amount", "gross_outlay_amount_by_award_cpe", "reporting_fiscal_year", "reporting_fiscal_period", "quarter_format_flag", "total_loan_value", ), "base_values", ) q = Q() for sub in final_submissions_for_all_fy(): q |= ( Q(reporting_fiscal_year=sub.fiscal_year) & Q(quarter_format_flag=sub.is_quarter) & Q(reporting_fiscal_period=sub.fiscal_period) ) aggregate_faba = With( base_values.queryset() .values("grouping_key") .annotate( obligation=Coalesce(Sum("transaction_obligated_amount"), 0), outlay=Coalesce(Sum(Case(When(q, then=F("gross_outlay_amount_by_award_cpe")), default=Value(0),)), 0,), ) .values("grouping_key", "obligation", "outlay"), "aggregate_faba", ) distinct_awards = With( base_values.queryset().values("grouping_key", "award_id", "total_loan_value").distinct(), "distinct_awards", ) aggregate_awards = With( distinct_awards.queryset() .values("grouping_key") .annotate(award_count=Count("award_id"), face_value_of_loan=Coalesce(Sum("total_loan_value"), 0)) .values("grouping_key", "award_count", "face_value_of_loan"), "aggregate_awards", ) return Bunch( award_count_column=aggregate_awards.col.award_count, obligation_column=aggregate_faba.col.obligation, outlay_column=aggregate_faba.col.outlay, face_value_of_loan_column=aggregate_awards.col.face_value_of_loan, queryset=aggregate_awards.join( aggregate_faba.join(base_model, **{base_model_column: aggregate_faba.col.grouping_key}), **{base_model_column: aggregate_awards.col.grouping_key}, ) .with_cte(base_values) .with_cte(aggregate_faba) .with_cte(distinct_awards) .with_cte(aggregate_awards), )
def get(self, request: Request, organization) -> Response: """ Retrieve an Organization's Dashboards ````````````````````````````````````` Retrieve a list of dashboards that are associated with the given organization. If on the first page, this endpoint will also include any pre-built dashboards that haven't been replaced or removed. :pparam string organization_slug: the slug of the organization the dashboards belongs to. :qparam string query: the title of the dashboard being searched for. :auth: required """ if not features.has("organizations:dashboards-basic", organization, actor=request.user): return Response(status=404) dashboards = Dashboard.objects.filter(organization_id=organization.id).select_related( "created_by" ) query = request.GET.get("query") if query: dashboards = dashboards.filter(title__icontains=query) prebuilt = Dashboard.get_prebuilt_list(organization, query) sort_by = request.query_params.get("sort") if sort_by and sort_by.startswith("-"): sort_by, desc = sort_by[1:], True else: desc = False if sort_by == "title": order_by = [ "-title" if desc else "title", "-date_added", ] elif sort_by == "dateCreated": order_by = "-date_added" if desc else "date_added" elif sort_by == "mostPopular": order_by = [ "visits" if desc else "-visits", "-date_added", ] elif sort_by == "recentlyViewed": order_by = "last_visited" if desc else "-last_visited" elif sort_by == "mydashboards": order_by = [ Case(When(created_by_id=request.user.id, then=-1), default="created_by_id"), "-date_added", ] elif sort_by == "myDashboardsAndRecentlyViewed": order_by = [ Case(When(created_by_id=request.user.id, then=-1), default=1), "-last_visited", ] else: order_by = "title" if not isinstance(order_by, list): order_by = [order_by] dashboards = dashboards.order_by(*order_by) list_serializer = DashboardListSerializer() def handle_results(results): serialized = [] dashboards = [] for item in results: if isinstance(item, dict): cloned = item.copy() widgets = cloned.pop("widgets", []) cloned["widgetDisplay"] = [w["displayType"] for w in widgets] serialized.append(cloned) else: dashboards.append(item) serialized.extend(serialize(dashboards, request.user, serializer=list_serializer)) return serialized return self.paginate( request=request, sources=[prebuilt, dashboards], paginator_cls=ChainPaginator, on_results=handle_results, )
def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) if not (self.object.ended or self.can_edit): raise Http404() queryset = Submission.objects.filter(contest_object=self.object) ac_count = Count( Case(When(result='AC', then=Value(1)), output_field=IntegerField())) ac_rate = CombinedExpression(ac_count / Count('problem'), '*', Value(100.0), output_field=FloatField()) status_count_queryset = list( queryset.values('problem__code', 'result').annotate( count=Count('result')).values_list('problem__code', 'result', 'count'), ) labels, codes = [], [] contest_problems = self.object.contest_problems.order_by( 'order').values_list('problem__name', 'problem__code') if contest_problems: labels, codes = zip(*contest_problems) num_problems = len(labels) status_counts = [[] for i in range(num_problems)] for problem_code, result, count in status_count_queryset: if problem_code in codes: status_counts[codes.index(problem_code)].append( (result, count)) result_data = defaultdict(partial(list, [0] * num_problems)) for i in range(num_problems): for category in _get_result_data(defaultdict( int, status_counts[i]))['categories']: result_data[category['code']][i] = category['count'] stats = { 'problem_status_count': { 'labels': labels, 'datasets': [{ 'label': name, 'backgroundColor': settings.DMOJ_STATS_SUBMISSION_RESULT_COLORS[name], 'data': data, } for name, data in result_data.items()], }, 'problem_ac_rate': get_bar_chart( queryset.values( 'contest__problem__order', 'problem__name').annotate(ac_rate=ac_rate).order_by( 'contest__problem__order').values_list( 'problem__name', 'ac_rate'), ), 'language_count': get_pie_chart( queryset.values('language__name').annotate( count=Count('language__name')).filter( count__gt=0).order_by('-count').values_list( 'language__name', 'count'), ), 'language_ac_rate': get_bar_chart( queryset.values('language__name').annotate( ac_rate=ac_rate).filter(ac_rate__gt=0).values_list( 'language__name', 'ac_rate'), ), } context['stats'] = mark_safe(json.dumps(stats)) return context
def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) self.object.source = self.object.source.split(',')[0] context['config'] = VANILLA_UI_CONFIG_FOR_RATINGS context['genres'] = ', '.join(genre.title for genre in self.object.genre.all()) if self.request.user.is_authenticated: context['suggestion_form'] = SuggestionForm( work=self.object, instance=Suggestion(user=self.request.user, work=self.object)) context['rating'] = current_user_rating(self.request, self.object) context['references'] = [] for reference in self.object.reference_set.all(): for domain, name in REFERENCE_DOMAINS: if reference.url.startswith(domain): context['references'].append((reference.url, name)) nb = Counter( Rating.objects.filter(work=self.object).values_list('choice', flat=True)) labels = OrderedDict([ ('favorite', 'Ajoutés aux favoris'), ('like', 'Ont aimé'), ('neutral', 'Neutre'), ('dislike', 'N\'ont pas aimé'), ('willsee', 'Ont envie de voir'), ('wontsee', 'N\'ont pas envie de voir'), ]) seen_ratings = {'favorite', 'like', 'neutral', 'dislike'} total = sum(nb.values()) if total > 0: context['stats'] = [] seen_total = sum(nb[rating] for rating in seen_ratings) for rating, label in labels.items(): if seen_total > 0 and rating not in seen_ratings: continue context['stats'].append({ 'value': nb[rating], 'colors': RATING_COLORS[rating], 'label': label }) context['seen_percent'] = round(100 * seen_total / float(total)) events = self.object.event_set \ .filter(date__gte=timezone.now()) \ .annotate(nb_attendees=Sum(Case( When(attendee__attending=True, then=Value(1)), default=Value(0), output_field=IntegerField(), ))) if len(events) > 0: my_events = {} if self.request.user.is_authenticated: my_events = dict( self.request.user.attendee_set.filter( event__in=events).values_list('event_id', 'attending')) context['events'] = [{ 'id': event.id, 'attending': my_events.get(event.id, None), 'type': event.get_event_type_display(), 'channel': event.channel, 'date': event.get_date(), 'link': event.link, 'location': event.location, 'nb_attendees': event.nb_attendees, } for event in events] return context
def sort_by_attribute(self, attribute_pk: Union[int, str], ascending: bool = True): """Sort a query set by the values of the given product attribute. :param attribute_pk: The database ID (must be a number) of the attribute to sort by. :param ascending: The sorting direction. """ qs: models.QuerySet = self # Retrieve all the products' attribute data IDs (assignments) and # product types that have the given attribute associated to them associated_values = tuple( AttributeProduct.objects.filter( attribute_id=attribute_pk).values_list("pk", "product_type_id")) if not associated_values: if not ascending: return qs.reverse() return qs attribute_associations, product_types_associated_to_attribute = zip( *associated_values) qs = qs.annotate( # Contains to retrieve the attribute data (singular) of each product # Refer to `AttributeProduct`. filtered_attribute=FilteredRelation( relation_name="attributes", condition=Q( attributes__assignment_id__in=attribute_associations), ), # Implicit `GROUP BY` required for the `StringAgg` aggregation grouped_ids=Count("id"), # String aggregation of the attribute's values to efficiently sort them concatenated_values=Case( # If the product has no association data but has the given attribute # associated to its product type, then consider the concatenated values # as empty (non-null). When( Q(product_type_id__in=product_types_associated_to_attribute ) & Q(filtered_attribute=None), then=models.Value(""), ), default=StringAgg( F("filtered_attribute__values__name"), delimiter=",", ordering=([ f"filtered_attribute__values__{field_name}" for field_name in AttributeValue._meta.ordering or [] ]), ), output_field=models.CharField(), ), concatenated_values_order=Case( # Make the products having no such attribute be last in the sorting When(concatenated_values=None, then=2), # Put the products having an empty attribute value at the bottom of # the other products. When(concatenated_values="", then=1), # Put the products having an attribute value to be always at the top default=0, output_field=models.IntegerField(), ), ) # Sort by concatenated_values_order then # Sort each group of products (0, 1, 2, ...) per attribute values # Sort each group of products by name, # if they have the same values or not values qs = qs.order_by("concatenated_values_order", "concatenated_values", "name") # Descending sorting if not ascending: return qs.reverse() return qs
def fetch_pokemons(**kwargs): # Those totals = current / total pokedex_stats = {'current': 0, 'total': 0, 'filters': 0, 'anonymous': 1} # @TODO: Remove hardcoding and make it dynamic from the model pokemon_filters = ("is_owned", "is_shiny", "is_pokeball", "is_language", "is_iv", "is_original_trainer", "is_gender") pagination = 40 qs_annotate = {} qs_annotate_total = {} qs_values = ["name", "number", "variant__name", "variant__number"] qs_filters = Q() qs_filters_hidden = Q() qs_order_by = "number" # Dynamically add language for name and variant for single_language in MODELTRANSLATION_LANGUAGES: qs_values.append("name_" + single_language) qs_values.append("variant__name_" + single_language) if "pokemon_region" in kwargs and kwargs['pokemon_region'] != "": qs_filters.add(Q(pokemonregion__region__slug=kwargs['pokemon_region']), Q.AND) qs_values.append("pokemonregion__number") qs_order_by = "pokemonregion__number" if "pokemon_type" in kwargs: if kwargs['pokemon_type'] == "pokemons": qs_filters.add(Q(variant__isnull=True), Q.AND) elif kwargs['pokemon_type'] == "forms": qs_filters.add(Q(variant__isnull=False), Q.AND) if "pokemon_number" in kwargs: qs_filters.add(Q(number=kwargs['pokemon_number']), Q.AND) if "variant__number" in kwargs: qs_filters.add(Q(variant__number=kwargs['variant__number']), Q.AND) # If logged in, left join with the UserPokemon data of that user if "user" in kwargs and kwargs['user'].is_authenticated: for single_filter in pokemon_filters: qs_values.append("t__" + single_filter) qs_annotate['t'] = FilteredRelation( 'userpokemon', condition=(Q(userpokemon__user=kwargs['user']) | Q(userpokemon__isnull=True))) # Apply search if "search_text" in kwargs and kwargs['search_text'] != "": search_number = kwargs['search_text'] # Pad the number lower than 100 to be like our number : 1 -> 001 if len(search_number) < 3: search_number = search_number.zfill(3) qs_search = Q() if "pokemon_region" in kwargs and kwargs['pokemon_region'] != "": qs_search.add(Q(**{'pokemonregion__number': search_number}), Q.OR) else: qs_search.add(Q(**{'number': search_number}), Q.OR) for single_language in MODELTRANSLATION_LANGUAGES: qs_search.add( Q( **{ 'name_' + single_language + '__contains': kwargs['search_text'] }), Q.OR) qs_filters.add(qs_search, Q.AND) # Apply settings filters if "settings_filters" in kwargs and len( kwargs['settings_filters']) > 0: pokedex_stats['filters'] = 1 for single_filter in kwargs['settings_filters']: qs_filters_hidden.add( Q(**{'t__' + single_filter: False}) | Q(t__isnull=True), Q.AND) # Get the Pokemons depending on all the differents settings pokemons_qs = get_pokemon_queryset(qs_annotate, (qs_filters & qs_filters_hidden), qs_values, qs_order_by) # Pagination pokemons_paginator = None if "page" in kwargs and kwargs['page'] != None: paginator = Paginator(pokemons_qs, pagination) pokedex_stats['current'] = paginator.count pokedex_stats['total'] = paginator.count try: pokemons_paginator = paginator.page(kwargs['page']) except PageNotAnInteger: pokemons_paginator = paginator.page(1) except EmptyPage: pokemons_paginator = paginator.page(paginator.num_pages) else: pokemons_paginator = pokemons_qs pokedex_stats['current'] = len(pokemons_qs) pokemons_list = [] for single_pokemon in pokemons_paginator: pokemon = single_pokemon pokemon['visible_number'] = pokemon['number'][:3] if 'pokemon_language' in kwargs and kwargs['pokemon_language'] != "en": translated_name = pokemon['name_' + kwargs['pokemon_language']] if translated_name != "": pokemon['name'] = translated_name if 'pokemonregion__number' in single_pokemon and single_pokemon[ 'pokemonregion__number'] != None: pokemon['visible_number'] = single_pokemon[ 'pokemonregion__number'][:3] if single_pokemon['variant__name'] != None: variant_name = single_pokemon['variant__name'] if 'pokemon_language' in kwargs and kwargs[ 'pokemon_language'] != "en": translated_name = pokemon['variant__name_' + kwargs['pokemon_language']] if translated_name != "": variant_name = translated_name pokemon['name'] = single_pokemon['name'].replace( "#NAME#", variant_name) # Init filters to default values for single_filter in pokemon_filters: pokemon[single_filter] = None # Update the filter in they are found in the database if "t__" + single_filter in single_pokemon: pokemon[single_filter] = single_pokemon["t__" + single_filter] pokemons_list.append(pokemon) # If the user is logged in, get the total of filters if "user" in kwargs and kwargs['user'].is_authenticated: # Will hold the number of Pokemons matching all those filters (except filtered one) qs_aggregate = {'total_pokemons': Count('id', distinct=True)} # Add all the count of the filters for single_filter in pokemon_filters: qs_aggregate['count_' + single_filter] = Count( Case( When(**{'userpokemon__' + single_filter: True}, then=Value(1))), filter=Q(userpokemon__user=kwargs['user'])) pokemons_total_qs = get_pokemon_queryset( qs_annotate, qs_filters, qs_values, qs_order_by).aggregate(**qs_aggregate) # Get all the values (from filters and the total) pokedex_stats['anonymous'] = 0 pokedex_stats['total'] = pokemons_total_qs['total_pokemons'] for single_filter in pokemon_filters: pokedex_stats['count_' + single_filter] = pokemons_total_qs['count_' + single_filter] print(connection.queries) return { "pokemons": pokemons_list, "paginator": pokemons_paginator, "pokedex_stats": pokedex_stats }
def get_severity_count(id, table): if table == "test": counts = Finding.objects.filter(test=id). \ prefetch_related('test__engagement__product').aggregate( total=Sum( Case(When(severity__in=('Critical', 'High', 'Medium', 'Low'), then=Value(1)), output_field=IntegerField())), critical=Sum( Case(When(severity='Critical', then=Value(1)), output_field=IntegerField())), high=Sum( Case(When(severity='High', then=Value(1)), output_field=IntegerField())), medium=Sum( Case(When(severity='Medium', then=Value(1)), output_field=IntegerField())), low=Sum( Case(When(severity='Low', then=Value(1)), output_field=IntegerField())), info=Sum( Case(When(severity='Info', then=Value(1)), output_field=IntegerField())), ) elif table == "engagement": counts = Finding.objects.filter(test__engagement=id, active=True, verified=True, duplicate=False). \ prefetch_related('test__engagement__product').aggregate( total=Sum( Case(When(severity__in=('Critical', 'High', 'Medium', 'Low'), then=Value(1)), output_field=IntegerField())), critical=Sum( Case(When(severity='Critical', then=Value(1)), output_field=IntegerField())), high=Sum( Case(When(severity='High', then=Value(1)), output_field=IntegerField())), medium=Sum( Case(When(severity='Medium', then=Value(1)), output_field=IntegerField())), low=Sum( Case(When(severity='Low', then=Value(1)), output_field=IntegerField())), info=Sum( Case(When(severity='Info', then=Value(1)), output_field=IntegerField())), ) elif table == "product": counts = Finding.objects.filter(test__engagement__product=id). \ prefetch_related('test__engagement__product').aggregate( total=Sum( Case(When(severity__in=('Critical', 'High', 'Medium', 'Low'), then=Value(1)), output_field=IntegerField())), critical=Sum( Case(When(severity='Critical', then=Value(1)), output_field=IntegerField())), high=Sum( Case(When(severity='High', then=Value(1)), output_field=IntegerField())), medium=Sum( Case(When(severity='Medium', then=Value(1)), output_field=IntegerField())), low=Sum( Case(When(severity='Low', then=Value(1)), output_field=IntegerField())), info=Sum( Case(When(severity='Info', then=Value(1)), output_field=IntegerField())), ) critical = 0 high = 0 medium = 0 low = 0 info = 0 if counts["info"]: info = counts["info"] if counts["low"]: low = counts["low"] if counts["medium"]: medium = counts["medium"] if counts["high"]: high = counts["high"] if counts["critical"]: critical = counts["critical"] total = critical + high + medium + low + info display_counts = [] display_counts.append("Critical: " + str(critical)) display_counts.append("High: " + str(high)) display_counts.append("Medium: " + str(medium)) display_counts.append("Low: " + str(low)) display_counts.append("Info: " + str(info)) if table == "test": display_counts.append("Total: " + str(total) + " Findings") elif table == "engagement": display_counts.append("Total: " + str(total) + " Active, Verified Findings") elif table == "product": display_counts.append("Total: " + str(total) + " Active Findings") display_counts = ", ".join([str(item) for item in display_counts]) return display_counts
def get_facet(qs=None, request=None, use_cache=True, hash_key=None): """ Gets an annotated list from a queryset. :param qs: recipe queryset to build facets from :param request: the web request that contains the necessary query parameters :param use_cache: will find results in cache, if any, and return them or empty list. will save the list of recipes IDs in the cache for future processing :param hash_key: the cache key of the recipe list to process only evaluated if the use_cache parameter is false """ facets = {} recipe_list = [] cache_timeout = 600 if use_cache: qs_hash = hash(frozenset(qs.values_list('pk'))) facets['cache_key'] = str(qs_hash) SEARCH_CACHE_KEY = f"recipes_filter_{qs_hash}" if c := caches['default'].get(SEARCH_CACHE_KEY, None): facets['Keywords'] = c['Keywords'] or [] facets['Foods'] = c['Foods'] or [] facets['Books'] = c['Books'] or [] facets['Ratings'] = c['Ratings'] or [] facets['Recent'] = c['Recent'] or [] else: facets['Keywords'] = [] facets['Foods'] = [] facets['Books'] = [] rating_qs = qs.annotate(rating=Round(Avg(Case(When(cooklog__created_by=request.user, then='cooklog__rating'), default=Value(0))))) facets['Ratings'] = dict(Counter(r.rating for r in rating_qs)) facets['Recent'] = ViewLog.objects.filter( created_by=request.user, space=request.space, created_at__gte=timezone.now() - timedelta(days=14) # TODO make days of recent recipe a setting ).values_list('recipe__pk', flat=True) cached_search = { 'recipe_list': list(qs.values_list('id', flat=True)), 'keyword_list': request.query_params.getlist('keywords', []), 'food_list': request.query_params.getlist('foods', []), 'book_list': request.query_params.getlist('book', []), 'search_keywords_or': str2bool(request.query_params.get('keywords_or', True)), 'search_foods_or': str2bool(request.query_params.get('foods_or', True)), 'search_books_or': str2bool(request.query_params.get('books_or', True)), 'space': request.space, 'Ratings': facets['Ratings'], 'Recent': facets['Recent'], 'Keywords': facets['Keywords'], 'Foods': facets['Foods'], 'Books': facets['Books'] } caches['default'].set(SEARCH_CACHE_KEY, cached_search, cache_timeout) return facets
def backfill_has_matches(apps, schema_editor): YaraResult = apps.get_model('yara', 'YaraResult') YaraResult.objects.filter(has_matches=None).update(has_matches=Case( When(matches='[]', then=Value(False)), default=Value(True)))
def product_type_counts(request): form = ProductTypeCountsForm() opened_in_period_list = [] oip = None cip = None aip = None all_current_in_pt = None top_ten = None pt = None today = datetime.now(tz=localtz) first_of_month = today.replace(day=1, hour=0, minute=0, second=0, microsecond=0) mid_month = first_of_month.replace(day=15, hour=23, minute=59, second=59, microsecond=999999) end_of_month = mid_month.replace(day=monthrange(today.year, today.month)[1], hour=23, minute=59, second=59, microsecond=999999) start_date = first_of_month end_date = end_of_month if request.method == 'GET' and 'month' in request.GET and 'year' in request.GET and 'product_type' in request.GET: form = ProductTypeCountsForm(request.GET) if form.is_valid(): pt = form.cleaned_data['product_type'] month = int(form.cleaned_data['month']) year = int(form.cleaned_data['year']) first_of_month = first_of_month.replace(month=month, year=year) month_requested = datetime(year, month, 1) end_of_month = month_requested.replace(day=monthrange(month_requested.year, month_requested.month)[1], hour=23, minute=59, second=59, microsecond=999999) start_date = first_of_month end_date = end_of_month oip = opened_in_period(start_date, end_date, pt) # trending data - 12 months for x in range(12, 0, -1): opened_in_period_list.append( opened_in_period(start_date + relativedelta(months=-x), end_of_month + relativedelta(months=-x), pt)) opened_in_period_list.append(oip) closed_in_period = Finding.objects.filter(mitigated__range=[start_date, end_date], test__engagement__product__prod_type=pt, severity__in=('Critical', 'High', 'Medium', 'Low')).values( 'numerical_severity').annotate(Count('numerical_severity')).order_by('numerical_severity') total_closed_in_period = Finding.objects.filter(mitigated__range=[start_date, end_date], test__engagement__product__prod_type=pt, severity__in=( 'Critical', 'High', 'Medium', 'Low')).aggregate( total=Sum( Case(When(severity__in=('Critical', 'High', 'Medium', 'Low'), then=Value(1)), output_field=IntegerField())))['total'] overall_in_pt = Finding.objects.filter(date__lt=end_date, verified=True, false_p=False, duplicate=False, out_of_scope=False, mitigated__isnull=True, test__engagement__product__prod_type=pt, severity__in=('Critical', 'High', 'Medium', 'Low')).values( 'numerical_severity').annotate(Count('numerical_severity')).order_by('numerical_severity') total_overall_in_pt = Finding.objects.filter(date__lte=end_date, verified=True, false_p=False, duplicate=False, out_of_scope=False, mitigated__isnull=True, test__engagement__product__prod_type=pt, severity__in=('Critical', 'High', 'Medium', 'Low')).aggregate( total=Sum( Case(When(severity__in=('Critical', 'High', 'Medium', 'Low'), then=Value(1)), output_field=IntegerField())))['total'] all_current_in_pt = Finding.objects.filter(date__lte=end_date, verified=True, false_p=False, duplicate=False, out_of_scope=False, mitigated__isnull=True, test__engagement__product__prod_type=pt, severity__in=( 'Critical', 'High', 'Medium', 'Low')).prefetch_related( 'test__engagement__product', 'test__engagement__product__prod_type', 'test__engagement__risk_acceptance', 'reporter').order_by( 'numerical_severity') top_ten = Product.objects.filter(engagement__test__finding__date__lte=end_date, engagement__test__finding__verified=True, engagement__test__finding__false_p=False, engagement__test__finding__duplicate=False, engagement__test__finding__out_of_scope=False, engagement__test__finding__mitigated__isnull=True, engagement__test__finding__severity__in=( 'Critical', 'High', 'Medium', 'Low'), prod_type=pt).annotate( critical=Sum( Case(When(engagement__test__finding__severity='Critical', then=Value(1)), output_field=IntegerField()) ), high=Sum( Case(When(engagement__test__finding__severity='High', then=Value(1)), output_field=IntegerField()) ), medium=Sum( Case(When(engagement__test__finding__severity='Medium', then=Value(1)), output_field=IntegerField()) ), low=Sum( Case(When(engagement__test__finding__severity='Low', then=Value(1)), output_field=IntegerField()) ), total=Sum( Case(When(engagement__test__finding__severity__in=( 'Critical', 'High', 'Medium', 'Low'), then=Value(1)), output_field=IntegerField())) ).order_by('-critical', '-high', '-medium', '-low')[:10] cip = {'S0': 0, 'S1': 0, 'S2': 0, 'S3': 0, 'Total': total_closed_in_period} aip = {'S0': 0, 'S1': 0, 'S2': 0, 'S3': 0, 'Total': total_overall_in_pt} for o in closed_in_period: cip[o['numerical_severity']] = o['numerical_severity__count'] for o in overall_in_pt: aip[o['numerical_severity']] = o['numerical_severity__count'] else: messages.add_message(request, messages.ERROR, "Please choose month and year and the Product Type.", extra_tags='alert-danger') add_breadcrumb(title="Bi-Weekly Metrics", top_level=True, request=request) return render(request, 'dojo/pt_counts.html', {'form': form, 'start_date': start_date, 'end_date': end_date, 'opened_in_period': oip, 'trending_opened': opened_in_period_list, 'closed_in_period': cip, 'overall_in_pt': aip, 'all_current_in_pt': all_current_in_pt, 'top_ten': top_ten, 'pt': pt} )
def homepage(request, template_name="homepage.html"): categories = [] for category in Category.objects.annotate(project_count=Count( Case(When(project__is_published=True, then=1)))): element = { "title": category.title, "description": category.description, "count": category.project_count, "slug": category.slug, "title_plural": category.title_plural, } categories.append(element) # get up to 5 random packages package_count = Project.objects.count() random_packages = [] if package_count > 1: package_ids = set([]) # Get 5 random keys package_ids = sample( list(range(1, package_count + 1)), # generate a list from 1 to package_count +1 min(package_count, 5) # Get a sample of the smaller of 5 or the package count ) # Get the random packages random_packages = Project.objects.filter(pk__in=package_ids)[:5] try: potw = Dpotw.objects.latest().package except Dpotw.DoesNotExist: potw = None except Project.DoesNotExist: potw = None try: gotw = Gotw.objects.latest().grid except Gotw.DoesNotExist: gotw = None except Grid.DoesNotExist: gotw = None # Public Service Announcement on homepage try: psa_body = PSA.objects.latest().body_text except PSA.DoesNotExist: psa_body = '<p>There are currently no announcements. To request a PSA, tweet at <a href="http://twitter.com/open_comparison">@Open_Comparison</a>.</p>' # Latest Django Packages blog post on homepage feed_result = get_feed() if len(feed_result.entries): blogpost_title = feed_result.entries[0].title blogpost_body = feed_result.entries[0].summary else: blogpost_title = '' blogpost_body = '' return render( request, template_name, { "latest_packages": Project.objects.published().order_by('-publication_time')[:7], "random_packages": random_packages, "potw": potw, "gotw": gotw, "psa_body": psa_body, "blogpost_title": blogpost_title, "blogpost_body": blogpost_body, "categories": categories, "package_count": package_count, "py3_compat": Project.objects.filter(version__supports_python3=True). select_related().distinct().count(), "open_source_count": Project.objects.exclude(repo_url__isnull=True).exclude( repo_url="").count(), "latest_python3": Version.objects.filter(supports_python3=True).select_related( "package").distinct().order_by("-created")[0:5], "drafts_count": Project.objects.filter(is_published=False).count(), "awaiting_projects_count": Project.objects.filter(is_awaiting_approval=True).count(), "published_projects_count": Project.objects.filter(is_published=True).count(), })