def _compute_orders(self, quotas, q_items, q_vars, size_left): events = {q.event_id for q in quotas} subevents = {q.subevent_id for q in quotas} seq = Q(subevent_id__in=subevents) if None in subevents: seq |= Q(subevent__isnull=True) op_lookup = OrderPosition.objects.filter( order__status__in=[Order.STATUS_PAID, Order.STATUS_PENDING], order__event_id__in=events, ).filter(seq).filter( Q( Q(variation_id__isnull=True) & Q(item_id__in={i['item_id'] for i in q_items if self._quota_objects[i['quota_id']] in quotas}) ) | Q( variation_id__in={i['itemvariation_id'] for i in q_vars if self._quota_objects[i['quota_id']] in quotas}) ).order_by() if any(q.release_after_exit for q in quotas): op_lookup = op_lookup.annotate( last_entry=Subquery( Checkin.objects.filter( position_id=OuterRef('pk'), list__allow_entry_after_exit=False, type=Checkin.TYPE_ENTRY, ).order_by().values('position_id').annotate( m=Max('datetime') ).values('m') ), last_exit=Subquery( Checkin.objects.filter( position_id=OuterRef('pk'), list__allow_entry_after_exit=False, type=Checkin.TYPE_EXIT, ).order_by().values('position_id').annotate( m=Max('datetime') ).values('m') ), ).annotate( is_exited=Case( When( Q(last_entry__isnull=False) & Q(last_exit__isnull=False) & Q(last_exit__gt=F('last_entry')), then=Value(1, output_field=models.IntegerField()), ), default=Value(0, output_field=models.IntegerField()), output_field=models.IntegerField(), ), ) else: op_lookup = op_lookup.annotate( is_exited=Value(0, output_field=models.IntegerField()) ) op_lookup = op_lookup.values('order__status', 'item_id', 'subevent_id', 'variation_id', 'is_exited').annotate(c=Count('*')) for line in sorted(op_lookup, key=lambda li: (int(li['is_exited']), li['order__status']), reverse=True): # p before n, exited before non-exited if line['variation_id']: qs = self._var_to_quotas[line['variation_id']] else: qs = self._item_to_quotas[line['item_id']] for q in qs: if q.subevent_id == line['subevent_id']: if line['order__status'] == Order.STATUS_PAID: self.count_paid_orders[q] += line['c'] q.cached_availability_paid_orders = self.count_paid_orders[q] elif line['order__status'] == Order.STATUS_PENDING: self.count_pending_orders[q] += line['c'] if q.release_after_exit and line['is_exited']: self.count_exited_orders[q] += line['c'] else: size_left[q] -= line['c'] if size_left[q] <= 0 and q not in self.results: if line['order__status'] == Order.STATUS_PAID: self.results[q] = Quota.AVAILABILITY_GONE, 0 else: self.results[q] = Quota.AVAILABILITY_ORDERED, 0
def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) update_poster_if_nsfw(self.object, self.request.user) self.object.source = self.object.source.split(',')[0] context['genres'] = ', '.join(genre.title for genre in self.object.genre.all()) if self.request.user.is_authenticated(): context['suggestion_form'] = SuggestionForm( instance=Suggestion(user=self.request.user, work=self.object)) try: context['rating'] = self.object.rating_set.get( user=self.request.user).choice except Rating.DoesNotExist: pass context['references'] = [] for reference in self.object.reference_set.all(): for domain, name in REFERENCE_DOMAINS: if reference.url.startswith(domain): context['references'].append((reference.url, name)) nb = Counter( Rating.objects.filter(work=self.object).values_list('choice', flat=True)) labels = OrderedDict([ ('favorite', 'Ajoutés aux favoris'), ('like', 'Ont aimé'), ('neutral', 'Neutre'), ('dislike', 'N\'ont pas aimé'), ('willsee', 'Ont envie de voir'), ('wontsee', 'N\'ont pas envie de voir'), ]) seen_ratings = {'favorite', 'like', 'neutral', 'dislike'} total = sum(nb.values()) if total > 0: context['stats'] = [] seen_total = sum(nb[rating] for rating in seen_ratings) for rating, label in labels.items(): if seen_total > 0 and rating not in seen_ratings: continue context['stats'].append({ 'value': nb[rating], 'colors': RATING_COLORS[rating], 'label': label }) context['seen_percent'] = round(100 * seen_total / float(total)) events = self.object.event_set\ .filter(date__gte=timezone.now())\ .annotate(nb_attendees=Sum(Case( When(attendee__attending=True, then=Value(1)), default=Value(0), output_field=IntegerField(), ))) if len(events) > 0: my_events = {} if self.request.user.is_authenticated(): my_events = dict( self.request.user.attendee_set.filter( event__in=events).values_list('event_id', 'attending')) context['events'] = [{ 'id': event.id, 'attending': my_events.get(event.id, None), 'type': event.get_event_type_display(), 'channel': event.channel, 'date': event.get_date(), 'link': event.link, 'location': event.location, 'nb_attendees': event.nb_attendees, } for event in events] return context
def list_assets_view(request): # Check sorting options allowed_sort_options = ["id", "name", "criticity_num", "score", "type", "updated_at", "risk_level", "risk_level__grade", "-id", "-name", "-criticity_num", "-score", "-type", "-updated_at", "-risk_level", "-risk_level__grade"] sort_options = request.GET.get("sort", "-updated_at") sort_options_valid = [] for s in sort_options.split(","): if s in allowed_sort_options and s not in sort_options_valid: sort_options_valid.append(str(s)) # Check Filtering options filter_options = request.GET.get("filter", "") # Todo: filter on fields allowed_filter_fields = ["id", "name", "criticity", "type", "score"] filter_criterias = filter_options.split(" ") filter_fields = {} filter_opts = "" for criteria in filter_criterias: field = criteria.split(":") if len(field) > 1 and field[0] in allowed_filter_fields: # allowed field if field[0] == "score": filter_fields.update({"risk_level__grade": field[1]}) else: filter_fields.update({str(field[0]): field[1]}) else: filter_opts = filter_opts + str(criteria.strip()) # Query assets_list = Asset.objects.filter(**filter_fields).filter( Q(value__icontains=filter_opts) | Q(name__icontains=filter_opts) | Q(description__icontains=filter_opts) ).annotate( criticity_num=Case( When(criticity="high", then=Value("1")), When(criticity="medium", then=Value("2")), When(criticity="low", then=Value("3")), default=Value("1"), output_field=CharField()) ).annotate(cat_list=ArrayAgg('categories__value')).order_by(*sort_options_valid) # Pagination assets nb_rows = request.GET.get('n', 16) assets_paginator = Paginator(assets_list, nb_rows) page = request.GET.get('page') try: assets = assets_paginator.page(page) except PageNotAnInteger: assets = assets_paginator.page(1) except EmptyPage: assets = assets_paginator.page(assets_paginator.num_pages) # List asset groups asset_groups = [] for asset_group in AssetGroup.objects.all(): ag = model_to_dict(asset_group) # extract asset names to diplay asset_list = [] for asset in asset_group.assets.all(): asset_list.append(asset.value) ag["assets_names"] = ", ".join(asset_list) ag["risk_grade"] = asset_group.get_risk_grade() asset_groups.append(ag) return render( request, 'list-assets.html', {'assets': assets, 'asset_groups': asset_groups})
def annotate_with_numbers(qs, event): """ Modifies a queryset of checkin lists by annotating it with the number of order positions and checkins associated with it. """ # Import here to prevent circular import from . import Order, OrderPosition, Item # This is the mother of all subqueries. Sorry. I try to explain it, at least? # First, we prepare a subquery that for every check-in that belongs to a paid-order # position and to the list in question. Then, we check that it also belongs to the # correct subevent (just to be sure) and aggregate over lists (so, over everything, # since we filtered by lists). cqs_paid = Checkin.objects.filter( position__order__event=event, position__order__status=Order.STATUS_PAID, list=OuterRef('pk') ).filter( # This assumes that in an event with subevents, *all* positions have subevents # and *all* checkin lists have a subevent assigned Q(position__subevent=OuterRef('subevent')) | (Q(position__subevent__isnull=True))).order_by().values( 'list').annotate(c=Count('*')).values('c') cqs_paid_and_pending = Checkin.objects.filter( position__order__event=event, position__order__status__in=[ Order.STATUS_PAID, Order.STATUS_PENDING ], list=OuterRef('pk') ).filter( # This assumes that in an event with subevents, *all* positions have subevents # and *all* checkin lists have a subevent assigned Q(position__subevent=OuterRef('subevent')) | (Q(position__subevent__isnull=True))).order_by().values( 'list').annotate(c=Count('*')).values('c') # Now for the hard part: getting all order positions that contribute to this list. This # requires us to use TWO subqueries. The first one, pqs_all, will only be used for check-in # lists that contain all the products of the event. This is the simpler one, it basically # looks like the check-in counter above. pqs_all_paid = OrderPosition.objects.filter( order__event=event, order__status=Order.STATUS_PAID, ).filter( # This assumes that in an event with subevents, *all* positions have subevents # and *all* checkin lists have a subevent assigned Q(subevent=OuterRef('subevent')) | (Q(subevent__isnull=True))).order_by().values( 'order__event').annotate(c=Count('*')).values('c') pqs_all_paid_and_pending = OrderPosition.objects.filter( order__event=event, order__status__in=[Order.STATUS_PAID, Order.STATUS_PENDING] ).filter( # This assumes that in an event with subevents, *all* positions have subevents # and *all* checkin lists have a subevent assigned Q(subevent=OuterRef('subevent')) | (Q(subevent__isnull=True))).order_by().values( 'order__event').annotate(c=Count('*')).values('c') # Now we need a subquery for the case of checkin lists that are limited to certain # products. We cannot use OuterRef("limit_products") since that would do a cross-product # with the products table and we'd get duplicate rows in the output with different annotations # on them, which isn't useful at all. Therefore, we need to add a second layer of subqueries # to retrieve all of those items and then check if the item_id is IN this subquery result. pqs_limited_paid = OrderPosition.objects.filter( order__event=event, order__status=Order.STATUS_PAID, item_id__in=Subquery( Item.objects.filter( checkinlist__pk=OuterRef(OuterRef('pk'))).values('pk')) ).filter( # This assumes that in an event with subevents, *all* positions have subevents # and *all* checkin lists have a subevent assigned Q(subevent=OuterRef('subevent')) | (Q(subevent__isnull=True))).order_by().values( 'order__event').annotate(c=Count('*')).values('c') pqs_limited_paid_and_pending = OrderPosition.objects.filter( order__event=event, order__status__in=[Order.STATUS_PAID, Order.STATUS_PENDING], item_id__in=Subquery( Item.objects.filter( checkinlist__pk=OuterRef(OuterRef('pk'))).values('pk')) ).filter( # This assumes that in an event with subevents, *all* positions have subevents # and *all* checkin lists have a subevent assigned Q(subevent=OuterRef('subevent')) | (Q(subevent__isnull=True))).order_by().values( 'order__event').annotate(c=Count('*')).values('c') # Finally, we put all of this together. We force empty subquery aggregates to 0 by using Coalesce() # and decide which subquery to use for this row. In the end, we compute an integer percentage in case # we want to display a progress bar. return qs.annotate( checkin_count=Coalesce( Case(When(include_pending=True, then=Subquery(cqs_paid_and_pending, output_field=models.IntegerField())), default=Subquery(cqs_paid, output_field=models.IntegerField()), output_field=models.IntegerField()), 0), position_count=Coalesce( Case(When(all_products=True, include_pending=False, then=Subquery(pqs_all_paid, output_field=models.IntegerField())), When(all_products=True, include_pending=True, then=Subquery(pqs_all_paid_and_pending, output_field=models.IntegerField())), When(all_products=False, include_pending=False, then=Subquery(pqs_limited_paid, output_field=models.IntegerField())), default=Subquery(pqs_limited_paid_and_pending, output_field=models.IntegerField()), output_field=models.IntegerField()), 0)).annotate(percent=Case(When(position_count__gt=0, then=F('checkin_count') * 100 / F('position_count')), default=0, output_field=models.IntegerField()))
def get(self, request, *args, **kwargs): DATE_FORMAT = "%Y-%m-%d" user_events = request.user.winery.events.all() user_events_reservations = Reservation.objects.filter( event_occurrence__event__in=user_events) user_events_ratings = Rate.objects.filter(event__in=user_events) from_date = request.query_params.get('from_date') to_date = request.query_params.get('to_date') try: if from_date: from_date = datetime.strptime(from_date, DATE_FORMAT) user_events_reservations = user_events_reservations.filter( event_occurrence__start__gte=from_date) user_events_ratings = user_events_ratings.filter( created__gte=from_date) if to_date: to_date = datetime.strptime(to_date, DATE_FORMAT) user_events_reservations = user_events_reservations.filter( event_occurrence__end__lte=to_date) user_events_ratings = user_events_ratings.filter( created__lte=to_date) except (ValueError, TypeError): return Response({"errors": "Dates format must be 'YYYY-MM-DD'"}, status=status.HTTP_400_BAD_REQUEST) today = date.today() age_18_birth_year = (today - relativedelta(years=18)).year age_35_birth_year = (today - relativedelta(years=35)).year age_50_birth_year = (today - relativedelta(years=50)).year response = { "reservations_by_event": (user_events_reservations.values("event_occurrence__event__name"). annotate(name=F("event_occurrence__event__name")).annotate( count=Count("id")).values("name", "count").order_by("count")[:10]), "reservations_by_month": (user_events_reservations.annotate(month=ExtractMonth( "event_occurrence__start")).values("month").annotate( count=Count("id")).values("month", "count").order_by("month")), "attendees_languages": (user_events_reservations.values("user__language__name").annotate( language=F("user__language__name")).annotate( count=Count("id")).values("language", "count")), "attendees_countries": (user_events_reservations.values("user__country__name").annotate( country=F("user__country__name")).annotate( count=Count("id")).values("country", "count")), "attendees_age_groups": (user_events_reservations.annotate(young=Case( When(user__birth_date__year__range=(age_35_birth_year, age_18_birth_year), then=1), default=0, output_field=IntegerField())).annotate( midage=Case(When(user__birth_date__year__range=( age_50_birth_year, age_35_birth_year - 1), then=1), default=0, output_field=IntegerField())). annotate(old=Case(When( user__birth_date__year__lt=age_50_birth_year, then=1), default=0, output_field=IntegerField())).aggregate( young_sum=Sum('young'), midage_sum=Sum('midage'), old_sum=Sum('old'))), "events_by_rating": (user_events_ratings.values("event__name").annotate( avg_rating=Coalesce(Avg("rate"), 0)).annotate( name=F("event__name")).values( "name", "avg_rating").order_by("-avg_rating")[:10]), "reservations_by_earnings": (user_events_reservations.values( "event_occurrence__event__name").annotate( earnings=Sum("paid_amount")).annotate( name=F("event_occurrence__event__name")).values( "name", "earnings").order_by("-earnings")[:10]) } # Awful hack to return months with count = 0 # To achieve this with SQL, a Month table is needed :( zero_count_months = [{"month": i, "count": 0} for i in range(1, 13)] reservations = response["reservations_by_month"] for elem in reservations: zero_count_months[elem['month'] - 1].update(elem) response['reservations_by_month'] = zero_count_months response['attendees_age_groups'] = [{ "group": k.split("_")[0], "count": v } for k, v in response['attendees_age_groups'].items()] return Response(response)
def get_list(self, params, only_count=None): """ 우편번호 검색 :param params: query, cursor, limit :return: addresses, total_count, next_offset """ query = params.data.get('query') limit = int(params.data.get('limit', 100) or 100) cursor = params.data.get('cursor', 1) cursor = int(cursor or 1) offset = (cursor - 1) * limit # query parameter parsing """ • 도로명 + 건물번호 (예 : 테헤란로 501) • 동/읍/면/리 + 번지 (예: 삼성동 157-27) • 건물명, 아파트명 (예: 반포자이아파트) """ words = query.split() if query else [] if len(words) > 0: conditions = Q() for word in words: if '-' in word: ns = word.split('-') for no in ns: conditions &= self._set_filter(no) else: conditions &= self._set_filter(word) else: return [], 0, None # make query query_set = Yoyakinfo.objects.all() query_set = query_set.annotate( zip=F('gichoguyeok_num'), # 새우편번호 doromyeong_addr=Concat('sido_name', Value(' '), 'sigungu_name', Case(When(eupmyendong_gubun='0', then=F('beopjeongeupmyendong_name')), default=Value('')), Value(' '), 'doromyeong', Value(' '), Case(When(is_basement='0', then=Value('')), When(is_basement='1', then=Value('지하')), When(is_basement='2', then=Value('공중')), default=Value('')), 'building_bonbeon', Case(When(building_bubeon=0, then=None), default=Concat(Value('-'), F('building_bubeon'), output_field=CharField())), output_field=CharField()), # 도로명 주소 etc=Case(When(eupmyendong_gubun='0', is_apartment_house='0', then=Value('')), When(eupmyendong_gubun='0', is_apartment_house='1', then=Concat(Value('('), F('sigunguyong_building_name'), Value(')'))), When(eupmyendong_gubun='1', is_apartment_house='0', then=Concat(Value('('), F('beopjeongeupmyendong_name'), Value(')'))), When(eupmyendong_gubun='1', is_apartment_house='1', then=Concat(Value('('), F('beopjeongeupmyendong_name'), Value(', '), F('sigunguyong_building_name'), Value(')')))), # etc(동네명, 건물명) jibun_addr=Concat('sido_name', Value(' '), 'sigungu_name', Value(' '), Case(When(eupmyendong_gubun='0', then=Value('')), When(eupmyendong_gubun='1', then=Concat(F('beopjeongeupmyendong_name'), Value(' '))), ), Case(When(beopjeongri_name='', then=Value('')), default=Concat(F('beopjeongri_name'), Value(' '))), 'jibeonbonbeon_beonji', Case(When(jibeonbubeon_ho=0, then=Value('')), default=Concat(Value('-'), F('jibeonbubeon_ho'), output_field=CharField())), Value(' '), 'sigunguyong_building_name', output_field=CharField()) # 지번 주소 ) query_set = query_set.filter(conditions) if only_count: return query_set.values('zip', 'doromyeong_addr', 'etc', 'jibun_addr').count() query_set = query_set.order_by('gichoguyeok_num', 'doromyeong', 'building_bonbeon')[offset: offset + limit + 1] return query_set
def detail_asset_group_view(request, assetgroup_id): asset_group = get_object_or_404(AssetGroup, id=assetgroup_id) assets = asset_group.assets.all() # findings = Finding.objects.filter(asset__in=assets).order_by('asset','severity', 'type', 'title') findings = Finding.objects.filter(asset__in=assets).annotate( severity_numm=Case(When(severity="high", then=Value("1")), When(severity="medium", then=Value("2")), When(severity="low", then=Value("3")), When(severity="info", then=Value("4")), default=Value("1"), output_field=CharField())).annotate( scope_list=ArrayAgg('scopes__name')).order_by( 'asset', 'severity_numm', 'type', 'updated_at') asset_scopes = {} for scope in EnginePolicyScope.objects.all(): asset_scopes.update({ scope.name: { 'priority': scope.priority, 'id': scope.id, 'total': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0 } }) findings_stats = { 'total': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'new': 0, 'ack': 0 } engines_stats = {} for finding in findings: findings_stats['total'] = findings_stats.get('total', 0) + 1 findings_stats[finding.severity] = findings_stats.get( finding.severity, 0) + 1 if finding.status == 'new': findings_stats['new'] = findings_stats.get('new', 0) + 1 if finding.status == 'ack': findings_stats['ack'] = findings_stats.get('ack', 0) + 1 for fs in finding.scope_list: if fs != None: c = asset_scopes[fs] asset_scopes[fs].update({ 'total': c['total'] + 1, finding.severity: c[finding.severity] + 1 }) if not finding.engine_type in engines_stats.keys(): engines_stats.update({finding.engine_type: 0}) engines_stats[finding.engine_type] = engines_stats.get( finding.engine_type, 0) + 1 # Scans scan_defs = ScanDefinition.objects.filter( Q(assetgroups_list__in=[asset_group])).annotate( engine_type_name=F('engine_type__name')) scans = [] for scan_def in scan_defs: scans = scans + list(scan_def.scan_set.all()) scans_stats = { 'performed': len(scans), 'defined': len(scan_defs), 'periodic': scan_defs.filter(scan_type='periodic').count(), 'ondemand': scan_defs.filter(scan_type='single').count(), 'running': scan_defs.filter( status='started').count(), #bug: a regrouper par assets } # calculate automatically risk grade #asset_group.calc_risk_grade() asset_group_risk_grade = { 'now': asset_group.get_risk_grade(), # 'day_ago': asset_group.get_risk_grade(history = 1), # 'week_ago': asset_group.get_risk_grade(history = 7), # 'month_ago': asset_group.get_risk_grade(history = 30), # 'year_ago': asset_group.get_risk_grade(history = 365) } return render( request, 'details-asset-group.html', { 'asset_group': asset_group, 'asset_group_risk_grade': asset_group_risk_grade, 'assets': assets, 'findings': findings, 'findings_stats': findings_stats, 'scans_stats': scans_stats, 'scans': scans, 'scan_defs': scan_defs, 'engines_stats': engines_stats, 'asset_scopes': sorted(asset_scopes.iteritems(), key=lambda (x, y): y['priority']) })
def calculate_category_scores_until_date_or_score(student_id_or_ids=None, category_id_or_ids=None, date: datetime = None, score: Score = None, **extra_score_filters): """Returns the category scores for each""" if not student_id_or_ids and not category_id_or_ids: raise ValueError("Need either student_ids or category_ids.") if not date and not score: end_date = timezone.now().date() else: end_date = date if date else score.assignment.due_date if isinstance(student_id_or_ids, int): student_filter = {"student_id": student_id_or_ids} elif isinstance(student_id_or_ids, list) or isinstance( student_id_or_ids, QuerySet): student_filter = {"student_id__in": student_id_or_ids} elif student_id_or_ids is None: student_filter = {} else: raise TypeError("Param 'student_id' must be int or list, " f"got type {type(student_id_or_ids)}") if isinstance(category_id_or_ids, int): category_filter = {"assignment__category_id": category_id_or_ids} elif isinstance(category_id_or_ids, list): category_filter = {"assignment__category_id__in": category_id_or_ids} elif category_id_or_ids is None: category_filter = {} else: raise TypeError("Param 'category_id' must be int or list, " f"got type {type(category_id_or_ids)}") if student_id_or_ids is not None: values_list = ['student_id'] else: values_list = [] result = (Score.objects.filter(**{ **student_filter, **category_filter, **extra_score_filters }).filter(Q(assignment__due_date__lte=end_date) | Q(assignment__due_date__isnull=True), assignment__is_active=True, assignment__due_date__lte=end_date, is_excused=False).annotate( category_id=F('assignment__category_id')).prefetch_related( 'assignment__category').values( *(values_list + ['category_id'])).annotate( assignment_count=Count('assignment_id', distinct=True), score_count=Count('*'), average=Avg('percentage'), points_earned=Sum('points'), possible_points=Sum(Case( When(points__isnull=True, then=None), default=F('assignment__possible_points')), output_field=FloatField()), excused_count=Sum(Case(When(is_excused=True, then=1), default=0), output_field=IntegerField()), missing_count=Sum(Case(When(is_missing=True, then=1), default=0), output_field=IntegerField()), latest_due_date=Max('assignment__due_date'))) if not result: import pdb pdb.set_trace() return result
def product_type_counts(request): form = ProductTypeCountsForm() opened_in_period_list = [] oip = None cip = None aip = None all_current_in_pt = None top_ten = None pt = None today = timezone.now() first_of_month = today.replace(day=1, hour=0, minute=0, second=0, microsecond=0) mid_month = first_of_month.replace(day=15, hour=23, minute=59, second=59, microsecond=999999) end_of_month = mid_month.replace(day=monthrange(today.year, today.month)[1], hour=23, minute=59, second=59, microsecond=999999) start_date = first_of_month end_date = end_of_month if request.method == 'GET' and 'month' in request.GET and 'year' in request.GET and 'product_type' in request.GET: form = ProductTypeCountsForm(request.GET) if form.is_valid(): pt = form.cleaned_data['product_type'] month = int(form.cleaned_data['month']) year = int(form.cleaned_data['year']) first_of_month = first_of_month.replace(month=month, year=year) month_requested = datetime(year, month, 1) end_of_month = month_requested.replace(day=monthrange(month_requested.year, month_requested.month)[1], hour=23, minute=59, second=59, microsecond=999999) start_date = first_of_month start_date = datetime(start_date.year, start_date.month, start_date.day, tzinfo=timezone.get_current_timezone()) end_date = end_of_month end_date = datetime(end_date.year, end_date.month, end_date.day, tzinfo=timezone.get_current_timezone()) oip = opened_in_period(start_date, end_date, pt) # trending data - 12 months for x in range(12, 0, -1): opened_in_period_list.append( opened_in_period(start_date + relativedelta(months=-x), end_of_month + relativedelta(months=-x), pt)) opened_in_period_list.append(oip) closed_in_period = Finding.objects.filter(mitigated__range=[start_date, end_date], test__engagement__product__prod_type=pt, severity__in=('Critical', 'High', 'Medium', 'Low')).values( 'numerical_severity').annotate(Count('numerical_severity')).order_by('numerical_severity') total_closed_in_period = Finding.objects.filter(mitigated__range=[start_date, end_date], test__engagement__product__prod_type=pt, severity__in=( 'Critical', 'High', 'Medium', 'Low')).aggregate( total=Sum( Case(When(severity__in=('Critical', 'High', 'Medium', 'Low'), then=Value(1)), output_field=IntegerField())))['total'] overall_in_pt = Finding.objects.filter(date__lt=end_date, verified=True, false_p=False, duplicate=False, out_of_scope=False, mitigated__isnull=True, test__engagement__product__prod_type=pt, severity__in=('Critical', 'High', 'Medium', 'Low')).values( 'numerical_severity').annotate(Count('numerical_severity')).order_by('numerical_severity') total_overall_in_pt = Finding.objects.filter(date__lte=end_date, verified=True, false_p=False, duplicate=False, out_of_scope=False, mitigated__isnull=True, test__engagement__product__prod_type=pt, severity__in=('Critical', 'High', 'Medium', 'Low')).aggregate( total=Sum( Case(When(severity__in=('Critical', 'High', 'Medium', 'Low'), then=Value(1)), output_field=IntegerField())))['total'] all_current_in_pt = Finding.objects.filter(date__lte=end_date, verified=True, false_p=False, duplicate=False, out_of_scope=False, mitigated__isnull=True, test__engagement__product__prod_type=pt, severity__in=( 'Critical', 'High', 'Medium', 'Low')).prefetch_related( 'test__engagement__product', 'test__engagement__product__prod_type', 'test__engagement__risk_acceptance', 'reporter').order_by( 'numerical_severity') top_ten = Product.objects.filter(engagement__test__finding__date__lte=end_date, engagement__test__finding__verified=True, engagement__test__finding__false_p=False, engagement__test__finding__duplicate=False, engagement__test__finding__out_of_scope=False, engagement__test__finding__mitigated__isnull=True, engagement__test__finding__severity__in=( 'Critical', 'High', 'Medium', 'Low'), prod_type=pt).annotate( critical=Sum( Case(When(engagement__test__finding__severity='Critical', then=Value(1)), output_field=IntegerField()) ), high=Sum( Case(When(engagement__test__finding__severity='High', then=Value(1)), output_field=IntegerField()) ), medium=Sum( Case(When(engagement__test__finding__severity='Medium', then=Value(1)), output_field=IntegerField()) ), low=Sum( Case(When(engagement__test__finding__severity='Low', then=Value(1)), output_field=IntegerField()) ), total=Sum( Case(When(engagement__test__finding__severity__in=( 'Critical', 'High', 'Medium', 'Low'), then=Value(1)), output_field=IntegerField())) ).order_by('-critical', '-high', '-medium', '-low')[:10] cip = {'S0': 0, 'S1': 0, 'S2': 0, 'S3': 0, 'Total': total_closed_in_period} aip = {'S0': 0, 'S1': 0, 'S2': 0, 'S3': 0, 'Total': total_overall_in_pt} for o in closed_in_period: cip[o['numerical_severity']] = o['numerical_severity__count'] for o in overall_in_pt: aip[o['numerical_severity']] = o['numerical_severity__count'] else: messages.add_message(request, messages.ERROR, "Please choose month and year and the Product Type.", extra_tags='alert-danger') add_breadcrumb(title="Bi-Weekly Metrics", top_level=True, request=request) return render(request, 'dojo/pt_counts.html', {'form': form, 'start_date': start_date, 'end_date': end_date, 'opened_in_period': oip, 'trending_opened': opened_in_period_list, 'closed_in_period': cip, 'overall_in_pt': aip, 'all_current_in_pt': all_current_in_pt, 'top_ten': top_ten, 'pt': pt} )
def case_builder(field_name): return Case(When(group__isnull=False, then=f'group__{field_name}'), When(pseudogroup__isnull=False, then=f'pseudogroup__{field_name}'), default=None, output_field=IntegerField())
def sum_when(**kwargs): return Coalesce(Sum(Case(When(then=F('value'), **kwargs), default=0)), 0)
def _toggle_tasks_activity(self, queryset): return queryset.update(enabled=Case( When(enabled=True, then=Value(False)), default=Value(True), ))
def get_queryset(self): # retrieve post request payload json_request = self.request.query_params # Retrieve fiscal_year & awarding_agency_id from Request fiscal_year = json_request.get('fiscal_year') awarding_agency_id = json_request.get('awarding_agency_id') # Optional Award Category award_category = json_request.get('award_category') # Required Query Parameters were not Provided if not (fiscal_year and awarding_agency_id): raise InvalidParameterException( 'Missing one or more required query parameters: fiscal_year, awarding_agency_id' ) if not check_valid_toptier_agency(awarding_agency_id): raise InvalidParameterException( 'Awarding Agency ID provided must correspond to a toptier agency' ) toptier_agency = Agency.objects.filter( id=awarding_agency_id).first().toptier_agency queryset = TransactionNormalized.objects.filter( federal_action_obligation__isnull=False) # DS-1655: if the AID is "097" (DOD), Include the branches of the military in the queryset if toptier_agency.cgac_code == DOD_CGAC: tta_list = DOD_ARMED_FORCES_CGAC queryset = queryset.filter( # Filter based on fiscal_year and awarding_category_id fiscal_year=fiscal_year, awarding_agency__toptier_agency__cgac_code__in=tta_list) else: queryset = queryset.filter( # Filter based on fiscal_year and awarding_category_id fiscal_year=fiscal_year, awarding_agency__toptier_agency__cgac_code=toptier_agency. cgac_code) queryset = queryset.annotate( award_category=F('award__category'), recipient_id=F('recipient__legal_entity_id'), recipient_name=F('recipient__recipient_name')) if award_category is not None: # Filter based on award_category if award_category != "other": queryset = queryset.filter(award_category=award_category) else: queryset = queryset.filter(Q(award_category='insurance') | Q(award_category='other')).\ annotate(award_category=Case(When(award_category='insurance', then=Value('other')), output_field=CharField()) ) # Sum Obligations for each Recipient queryset = queryset.values( 'award_category', 'recipient_id', 'recipient_name').annotate(obligated_amount=Sum( 'federal_action_obligation')).order_by('-obligated_amount') return queryset
def opened_in_period(start_date, end_date, pt): opened_in_period = Finding.objects.filter( date__range=[start_date, end_date], test__engagement__product__prod_type=pt, verified=True, false_p=False, duplicate=False, out_of_scope=False, mitigated__isnull=True, severity__in=( 'Critical', 'High', 'Medium', 'Low')).values('numerical_severity').annotate( Count('numerical_severity')).order_by('numerical_severity') total_opened_in_period = Finding.objects.filter( date__range=[start_date, end_date], test__engagement__product__prod_type=pt, verified=True, false_p=False, duplicate=False, out_of_scope=False, mitigated__isnull=True, severity__in=('Critical', 'High', 'Medium', 'Low')).aggregate(total=Sum( Case(When(severity__in=('Critical', 'High', 'Medium', 'Low'), then=Value(1)), output_field=IntegerField())))['total'] oip = { 'S0': 0, 'S1': 0, 'S2': 0, 'S3': 0, 'Total': total_opened_in_period, 'start_date': start_date, 'end_date': end_date, 'closed': Finding.objects.filter( mitigated__range=[start_date, end_date], test__engagement__product__prod_type=pt, severity__in=('Critical', 'High', 'Medium', 'Low')).aggregate(total=Sum( Case(When(severity__in=('Critical', 'High', 'Medium', 'Low'), then=Value(1)), output_field=IntegerField())))['total'], 'to_date_total': Finding.objects.filter(date__lte=end_date.date(), verified=True, false_p=False, duplicate=False, out_of_scope=False, mitigated__isnull=True, test__engagement__product__prod_type=pt, severity__in=('Critical', 'High', 'Medium', 'Low')).count() } for o in opened_in_period: oip[o['numerical_severity']] = o['numerical_severity__count'] return oip
def get_queryset(self): queryset = ( Borrower.objects.prefetch_related( Prefetch( "loans", queryset=Loan.objects.order_by("dateReleased"), ), ).annotate( borrowerName=Case( When(Q(recordType="BD"), then=F("business__tradeName")), When( Q(recordType="ID"), then=Concat( F("individual__firstname"), V(" "), F("individual__middlename"), V(" "), F("individual__lastname"), ), ), ), # borrowerType=Case( # When(recordType="BD", then=V("Business")), # When(recordType="ID", then=V("Individual")), # output_field=models.CharField(), # ), # borrowerAddress=Case( # When( # Q(recordType="BD"), # then=Concat( # F("business__businessAddress__streetNo"), # V(" "), # F("business__businessAddress__barangay"), # V(" "), # F("business__businessAddress__city"), # V(" "), # F("business__businessAddress__province"), # ), # ), # When( # Q(recordType="ID"), # then=Concat( # F("individual__individualAddress__streetNo"), # V(" "), # F("individual__individualAddress__barangay"), # V(" "), # F("individual__individualAddress__city"), # V(" "), # F("individual__individualAddress__province"), # ), # ), # ), contactPersonNumber=Case( When( Q(recordType="BD"), then=F("business__businessContactPerson__phoneNo"), ), ), areaCode=Case( When( Q(recordType="BD"), then=F("area__branchCode"), ), ), # tin=Case( # When( # Q(recordType="BD") & Q(business__businessIdentification__identificationType__value="10"), # then=F("business__businessIdentification__identificationNumber"), # ), # When( # Q(recordType="ID") & Q(individual__individualIdentification__identificationType__value="10"), # then=F("individual__individualIdentification__identificationNumber"), # ), # ), ).exclude(isDeleted=True).order_by("-borrowerId")) borrowerId = self.request.query_params.get("borrowerId", None) branch = self.request.query_params.get("branch", None) loanProgramId = self.request.query_params.get("loanProgramId", None) totalAvailmentsFrom = self.request.query_params.get( "totalAvailmentsFrom", None) totalAvailmentsTo = self.request.query_params.get( "totalAvailmentsTo", None) totalOutstandingBalanceFrom = self.request.query_params.get( "totalOutstandingBalanceFrom", None) totalOutstandingBalanceTo = self.request.query_params.get( "totalOutstandingBalanceTo", None) totalPaymentsFrom = self.request.query_params.get( "totalPaymentsFrom", None) totalPaymentsTo = self.request.query_params.get( "totalPaymentsTo", None) clientSinceFrom = self.request.query_params.get( "clientSinceFrom", None) clientSinceTo = self.request.query_params.get("clientSinceTo", None) if borrowerId is not None: queryset = queryset.filter(borrowerId=borrowerId) if branch is not None: queryset = queryset.filter(area=branch) if totalAvailmentsFrom is not None and totalAvailmentsTo is not None: borrowers = [] for borrower in queryset: borrower.totalAvailments = borrower.getTotalAvailments() if (int(borrower.totalAvailments) >= int(totalAvailmentsFrom) ) and (int(borrower.totalAvailments) <= int(totalAvailmentsTo)): borrowers.append(borrower.pk) queryset = queryset.filter(borrowerId__in=borrowers) if totalOutstandingBalanceFrom is not None and totalOutstandingBalanceTo is not None: borrowers = [] for borrower in queryset: borrower.totalOutstandingBalance = borrower.getTotalOutstandingBalance( ) if (int(borrower.totalOutstandingBalance) >= int(totalOutstandingBalanceFrom)) and ( int(borrower.totalOutstandingBalance) <= int(totalOutstandingBalanceTo)): borrowers.append(borrower.pk) queryset = queryset.filter(borrowerId__in=borrowers) if totalPaymentsFrom is not None and totalPaymentsTo is not None: borrowers = [] for borrower in queryset: borrower.payments = borrower.getPayments() borrower.totalPayments = borrower.getTotalPayments() if (int(borrower.totalPayments) >= int(totalPaymentsFrom)) and (int( borrower.totalPayments) <= int(totalPaymentsTo)): borrowers.append(borrower.pk) queryset = queryset.filter(borrowerId__in=borrowers) if clientSinceFrom is not None and clientSinceTo is not None: queryset = queryset.filter( accreditationDate__gte=clientSinceFrom).filter( accreditationDate__lte=clientSinceTo) for borrower in queryset: borrower.totalAvailments = borrower.getTotalAvailments() borrower.totalOutstandingBalance = borrower.getTotalOutstandingBalance( ) borrower.payments = borrower.getPayments() borrower.totalPayments = borrower.getTotalPayments() return queryset
def metrics(request, mtype): template = 'dojo/metrics.html' page_name = 'Product Type Metrics' show_pt_filter = True sql_age_query = "" if "postgresql" in settings.DATABASES["default"]["ENGINE"]: sql_age_query = """SELECT (CASE WHEN (dojo_finding.mitigated IS NULL) THEN DATE_PART(\'day\', date::timestamp - dojo_finding.date::timestamp) ELSE DATE_PART(\'day\', dojo_finding.mitigated::timestamp - dojo_finding.date::timestamp) END)""" else: sql_age_query = """SELECT IF(dojo_finding.mitigated IS NULL, DATEDIFF(CURDATE(), dojo_finding.date), DATEDIFF(dojo_finding.mitigated, dojo_finding.date))""" findings = Finding.objects.filter(verified=True, severity__in=('Critical', 'High', 'Medium', 'Low', 'Info')).prefetch_related( 'test__engagement__product', 'test__engagement__product__prod_type', 'test__engagement__risk_acceptance', 'risk_acceptance_set', 'reporter').extra( select={ 'ra_count': 'SELECT COUNT(*) FROM dojo_risk_acceptance INNER JOIN ' 'dojo_risk_acceptance_accepted_findings ON ' '( dojo_risk_acceptance.id = dojo_risk_acceptance_accepted_findings.risk_acceptance_id ) ' 'WHERE dojo_risk_acceptance_accepted_findings.finding_id = dojo_finding.id', "sql_age": sql_age_query }, ) active_findings = Finding.objects.filter(verified=True, active=True, severity__in=('Critical', 'High', 'Medium', 'Low', 'Info')).prefetch_related( 'test__engagement__product', 'test__engagement__product__prod_type', 'test__engagement__risk_acceptance', 'risk_acceptance_set', 'reporter').extra( select={ 'ra_count': 'SELECT COUNT(*) FROM dojo_risk_acceptance INNER JOIN ' 'dojo_risk_acceptance_accepted_findings ON ' '( dojo_risk_acceptance.id = dojo_risk_acceptance_accepted_findings.risk_acceptance_id ) ' 'WHERE dojo_risk_acceptance_accepted_findings.finding_id = dojo_finding.id', "sql_age": sql_age_query }, ) if mtype != 'All': pt = Product_Type.objects.filter(id=mtype) request.GET._mutable = True request.GET.appendlist('test__engagement__product__prod_type', mtype) request.GET._mutable = False mtype = pt[0].name show_pt_filter = False page_name = '%s Metrics' % mtype prod_type = pt elif 'test__engagement__product__prod_type' in request.GET: prod_type = Product_Type.objects.filter(id__in=request.GET.getlist('test__engagement__product__prod_type', [])) else: prod_type = Product_Type.objects.all() findings = MetricsFindingFilter(request.GET, queryset=findings) active_findings = MetricsFindingFilter(request.GET, queryset=active_findings) findings.qs # this is needed to load details from filter since it is lazy active_findings.qs # this is needed to load details from filter since it is lazy start_date = findings.filters['date'].start_date start_date = datetime(start_date.year, start_date.month, start_date.day, tzinfo=timezone.get_current_timezone()) end_date = findings.filters['date'].end_date end_date = datetime(end_date.year, end_date.month, end_date.day, tzinfo=timezone.get_current_timezone()) if len(prod_type) > 0: findings_closed = Finding.objects.filter(mitigated__range=[start_date, end_date], test__engagement__product__prod_type__in=prod_type).prefetch_related( 'test__engagement__product') # capture the accepted findings in period accepted_findings = Finding.objects.filter(risk_acceptance__created__range=[start_date, end_date], test__engagement__product__prod_type__in=prod_type). \ prefetch_related('test__engagement__product') accepted_findings_counts = Finding.objects.filter(risk_acceptance__created__range=[start_date, end_date], test__engagement__product__prod_type__in=prod_type). \ prefetch_related('test__engagement__product').aggregate( total=Sum( Case(When(severity__in=('Critical', 'High', 'Medium', 'Low'), then=Value(1)), output_field=IntegerField())), critical=Sum( Case(When(severity='Critical', then=Value(1)), output_field=IntegerField())), high=Sum( Case(When(severity='High', then=Value(1)), output_field=IntegerField())), medium=Sum( Case(When(severity='Medium', then=Value(1)), output_field=IntegerField())), low=Sum( Case(When(severity='Low', then=Value(1)), output_field=IntegerField())), info=Sum( Case(When(severity='Info', then=Value(1)), output_field=IntegerField())), ) else: findings_closed = Finding.objects.filter(mitigated__range=[start_date, end_date]).prefetch_related( 'test__engagement__product') accepted_findings = Finding.objects.filter(risk_acceptance__created__range=[start_date, end_date]). \ prefetch_related('test__engagement__product') accepted_findings_counts = Finding.objects.filter(risk_acceptance__created__range=[start_date, end_date]). \ prefetch_related('test__engagement__product').aggregate( total=Sum( Case(When(severity__in=('Critical', 'High', 'Medium', 'Low'), then=Value(1)), output_field=IntegerField())), critical=Sum( Case(When(severity='Critical', then=Value(1)), output_field=IntegerField())), high=Sum( Case(When(severity='High', then=Value(1)), output_field=IntegerField())), medium=Sum( Case(When(severity='Medium', then=Value(1)), output_field=IntegerField())), low=Sum( Case(When(severity='Low', then=Value(1)), output_field=IntegerField())), info=Sum( Case(When(severity='Info', then=Value(1)), output_field=IntegerField())), ) r = relativedelta(end_date, start_date) months_between = (r.years * 12) + r.months # include current month months_between += 1 weeks_between = int(ceil((((r.years * 12) + r.months) * 4.33) + (r.days / 7))) if weeks_between <= 0: weeks_between += 2 monthly_counts = get_period_counts(active_findings.qs, findings.qs, findings_closed, accepted_findings, months_between, start_date, relative_delta='months') weekly_counts = get_period_counts(active_findings.qs, findings.qs, findings_closed, accepted_findings, weeks_between, start_date, relative_delta='weeks') top_ten = Product.objects.filter(engagement__test__finding__verified=True, engagement__test__finding__false_p=False, engagement__test__finding__duplicate=False, engagement__test__finding__out_of_scope=False, engagement__test__finding__mitigated__isnull=True, engagement__test__finding__severity__in=( 'Critical', 'High', 'Medium', 'Low'), prod_type__in=prod_type).annotate( critical=Sum( Case(When(engagement__test__finding__severity='Critical', then=Value(1)), output_field=IntegerField()) ), high=Sum( Case(When(engagement__test__finding__severity='High', then=Value(1)), output_field=IntegerField()) ), medium=Sum( Case(When(engagement__test__finding__severity='Medium', then=Value(1)), output_field=IntegerField()) ), low=Sum( Case(When(engagement__test__finding__severity='Low', then=Value(1)), output_field=IntegerField()) ), total=Sum( Case(When(engagement__test__finding__severity__in=( 'Critical', 'High', 'Medium', 'Low'), then=Value(1)), output_field=IntegerField())) ).order_by('-critical', '-high', '-medium', '-low')[:10] age_detail = [0, 0, 0, 0] in_period_counts = {"Critical": 0, "High": 0, "Medium": 0, "Low": 0, "Info": 0, "Total": 0} in_period_details = {} closed_in_period_counts = {"Critical": 0, "High": 0, "Medium": 0, "Low": 0, "Info": 0, "Total": 0} closed_in_period_details = {} accepted_in_period_details = {} for finding in findings.qs: if 0 <= finding.sql_age <= 30: age_detail[0] += 1 elif 30 < finding.sql_age <= 60: age_detail[1] += 1 elif 60 < finding.sql_age <= 90: age_detail[2] += 1 elif finding.sql_age > 90: age_detail[3] += 1 in_period_counts[finding.severity] += 1 in_period_counts['Total'] += 1 if finding.test.engagement.product.name not in in_period_details: in_period_details[finding.test.engagement.product.name] = { 'path': reverse('view_product_findings', args=(finding.test.engagement.product.id,)), 'Critical': 0, 'High': 0, 'Medium': 0, 'Low': 0, 'Info': 0, 'Total': 0} in_period_details[ finding.test.engagement.product.name ][finding.severity] += 1 in_period_details[finding.test.engagement.product.name]['Total'] += 1 for finding in accepted_findings: if finding.test.engagement.product.name not in accepted_in_period_details: accepted_in_period_details[finding.test.engagement.product.name] = { 'path': reverse('accepted_findings') + '?test__engagement__product=' + str( finding.test.engagement.product.id), 'Critical': 0, 'High': 0, 'Medium': 0, 'Low': 0, 'Info': 0, 'Total': 0} accepted_in_period_details[ finding.test.engagement.product.name ][finding.severity] += 1 accepted_in_period_details[finding.test.engagement.product.name]['Total'] += 1 for f in findings_closed: closed_in_period_counts[f.severity] += 1 closed_in_period_counts['Total'] += 1 if f.test.engagement.product.name not in closed_in_period_details: closed_in_period_details[f.test.engagement.product.name] = { 'path': reverse('closed_findings') + '?test__engagement__product=' + str( f.test.engagement.product.id), 'Critical': 0, 'High': 0, 'Medium': 0, 'Low': 0, 'Info': 0, 'Total': 0} closed_in_period_details[ f.test.engagement.product.name ][f.severity] += 1 closed_in_period_details[f.test.engagement.product.name]['Total'] += 1 punchcard = list() ticks = list() highest_count = 0 if 'view' in request.GET and 'dashboard' == request.GET['view']: punchcard, ticks, highest_count = get_punchcard_data(findings.qs, weeks_between, start_date) page_name = (get_system_setting('team_name')) + " Metrics" template = 'dojo/dashboard-metrics.html' add_breadcrumb(title=page_name, top_level=not len(request.GET), request=request) return render(request, template, { 'name': page_name, 'start_date': start_date, 'end_date': end_date, 'findings': findings, 'opened_per_month': monthly_counts['opened_per_period'], 'active_per_month': monthly_counts['active_per_period'], 'opened_per_week': weekly_counts['opened_per_period'], 'accepted_per_month': monthly_counts['accepted_per_period'], 'accepted_per_week': weekly_counts['accepted_per_period'], 'top_ten_products': top_ten, 'age_detail': age_detail, 'in_period_counts': in_period_counts, 'in_period_details': in_period_details, 'accepted_in_period_counts': accepted_findings_counts, 'accepted_in_period_details': accepted_in_period_details, 'closed_in_period_counts': closed_in_period_counts, 'closed_in_period_details': closed_in_period_details, 'punchcard': punchcard, 'ticks': ticks, 'highest_count': highest_count, 'show_pt_filter': show_pt_filter, })
def get_queryset(self): queryset = (Borrower.objects.prefetch_related( "borrowerAttachments", Prefetch("documents", queryset=Document.objects.order_by("dateCreated")), Prefetch("loans", queryset=Loan.objects.order_by("dateReleased")), Prefetch( "documents__documentMovements", queryset=DocumentMovement.objects.order_by("-dateCreated")), ).annotate( borrowerName=Case( When(Q(recordType="BD"), then=F("business__tradeName")), When( Q(recordType="ID"), then=Concat( F("individual__firstname"), V(" "), F("individual__middlename"), V(" "), F("individual__lastname"), ), ), ), _area=Case(When(Q(recordType="BD"), then=F("area__branchCode")), ), ).exclude(isDeleted=True).order_by("borrowerId")) loanProgramId = self.request.query_params.get("loanProgramId", None) outstandingBalance = self.request.query_params.get( "outstandingBalance", None) for borrower in queryset: borrower.totalAvailments = borrower.getTotalAvailments() borrower.totalOutstandingBalance = borrower.getTotalOutstandingBalance( ) borrower.payments = borrower.getPayments() borrower.totalPayments = borrower.getTotalPayments() for loan in borrower.loans.all(): loan.totalPayment = loan.getTotalPayment loan.totalPrincipalPayment = loan.getTotalPrincipalPayment() loan.totalInterestPayment = loan.getTotaInterestPayment() loan.totalAccruedInterestPayment = loan.getTotalAccruedInterestPayment( ) loan.totalTotalInterestPayment = loan.getTotalTotalInterestPayment( ) loan.totalPenaltyPayment = loan.getTotalPenaltyPayment() loan.totalAdditionalInterestPayment = loan.getTotalAdditionalInterestPayment( ) if loanProgramId is not None: borrower.totalAvailmentPerProgram = borrower.getTotalAvailmentsPerProgram( loanProgramId) if outstandingBalance: borrower.totalOutstandingBalance = borrower.getTotalOutstandingBalance( ) exclude = [] if borrower.totalOutstandingBalance == 0: exclude.append(borrower.pk) queryset = queryset.exclude(pk__in=exclude) for borrower in queryset: borrower.totalAvailments = str( borrower.getTotalAvailments()) + " | number :'2'" borrower.totalOutstandingBalance = str( borrower.getTotalOutstandingBalance()) + " | number :'2'" borrower.payments = borrower.getPayments() borrower.totalPayments = str( borrower.getTotalPayments()) + " | number :'2'" return queryset
def __init__(self, field, condition=None, **lookups): if lookups and condition is None: condition = Q(**lookups) case = Case(When(condition, then=field), default=0) super(SumIf, self).__init__(case)
def cond_int(query): return Case( When(query, then=Value(1)), default=Value(0), output_field=IntegerField(), )
def get_all_tx_hashes(self, safe_address: str, queued: bool = True, trusted: bool = True) -> QuerySet: """ Build a queryset with hashes for every tx for a Safe for pagination filtering. In the case of Multisig Transactions, as some of them are not mined, we use the SafeTxHash Criteria for building this list: - Return only multisig txs with `nonce < current Safe Nonce` - The endpoint should only show incoming transactions that have been mined - The transactions should be sorted by execution date. If an outgoing transaction doesn't have an execution date the execution date of the transaction with the same nonce that has been executed should be taken. - Incoming and outgoing transfers or Eth/tokens must be under a multisig/module tx if triggered by one. Otherwise they should have their own entry in the list using a EthereumTx :param safe_address: :param queued: By default `True`, all transactions are returned. With `False`, just txs wih `nonce < current Safe Nonce` are returned. :param trusted: By default `True`, just txs that are trusted are returned (with at least one confirmation, sent by a delegate or indexed). With `False` all txs are returned :return: List with tx hashes sorted by date (newest first) """ # If tx is not mined, get the execution date of a tx mined with the same nonce case = Case(When(ethereum_tx__block=None, then=MultisigTransaction.objects.filter( safe=OuterRef('safe'), nonce=OuterRef('nonce')).exclude( ethereum_tx__block=None).values( 'ethereum_tx__block__timestamp')), default=F('ethereum_tx__block__timestamp')) multisig_safe_tx_ids = MultisigTransaction.objects.filter( safe=safe_address).annotate( execution_date=case, block=F('ethereum_tx__block_id'), ).values( 'safe_tx_hash', 'execution_date', 'block', 'created' ) # Tricky, we will merge SafeTx hashes with EthereumTx hashes # Block is needed to get stable ordering if not queued: # Filter out txs with nonce >= Safe nonce last_nonce_query = MultisigTransaction.objects.filter( safe=safe_address).exclude( ethereum_tx=None).order_by('-nonce').values('nonce') multisig_safe_tx_ids = multisig_safe_tx_ids.filter( nonce__lte=Subquery(last_nonce_query[:1])) if trusted: # Just show trusted transactions multisig_safe_tx_ids = multisig_safe_tx_ids.filter(trusted=True) # Get module txs module_tx_ids = ModuleTransaction.objects.filter( safe=safe_address).annotate( execution_date=F('internal_tx__ethereum_tx__block__timestamp'), block=F('internal_tx__ethereum_tx__block_id'), ).distinct().values('internal_tx__ethereum_tx_id', 'execution_date', 'block', 'created') mulsitig_hashes = MultisigTransaction.objects.filter( safe=safe_address).exclude( ethereum_tx=None).values('ethereum_tx_id') module_hashes = ModuleTransaction.objects.filter( safe=safe_address).values('internal_tx__ethereum_tx_id') multisig_and_module_hashes = mulsitig_hashes.union(mulsitig_hashes) # Get incoming tokens not included on Multisig or Module txs event_tx_ids = EthereumEvent.objects.erc20_and_721_events().filter( arguments__to=safe_address).exclude( ethereum_tx__in=multisig_and_module_hashes).annotate( execution_date=F('ethereum_tx__block__timestamp'), created=F('ethereum_tx__block__timestamp'), block=F('ethereum_tx__block_id'), ).distinct().values('ethereum_tx_id', 'execution_date', 'block', 'created') # Get incoming txs not included on Multisig or Module txs internal_tx_ids = InternalTx.objects.filter( call_type=EthereumTxCallType.CALL.value, value__gt=0, to=safe_address, ).exclude(ethereum_tx__in=multisig_and_module_hashes).annotate( execution_date=F('ethereum_tx__block__timestamp'), created=F('ethereum_tx__block__timestamp'), block=F('ethereum_tx__block_id'), ).distinct().values('ethereum_tx_id', 'execution_date', 'block', 'created') # Tricky, we merge SafeTx hashes with EthereumTx hashes queryset = multisig_safe_tx_ids.distinct().union(event_tx_ids).union( internal_tx_ids).union(internal_tx_ids).union( module_tx_ids).order_by('-execution_date', 'block', '-created') # Order by block because `block_number < NULL`, so txs mined will have preference, # and `created` to get always the same ordering with not executed transactions, as they will share # the same `execution_date` that the mined tx return queryset
def locationQuery(**kwargs) -> object: ORDER_COLUMN_CHOICES = Choices( ('0', 'id'), ('1', 'codeName'), ('2', 'location_owner'), ('3', 'location_phone'), ('4', 'location_companyNumber'), ('5', 'location_address'), ('6', 'location_address_category'), ('7', 'character_string'), ('8', 'character_string'), ('9', 'location_manager_string'), ) draw = int(kwargs.get('draw', None)[0]) start = int(kwargs.get('start', None)[0]) length = int(kwargs.get('length', None)[0]) search_value = kwargs.get('search[value]', None)[0] order_column = kwargs.get('order[0][column]', None)[0] order = kwargs.get('order[0][dir]', None)[0] order_column = ORDER_COLUMN_CHOICES[order_column] type = kwargs.get('type', None)[0] queryset = Location.objects.all().filter(delete_state='N').filter(type=type)\ .annotate(type_string=Case( When(type='01', then=Value('포장재입고')), When(type='03', then=Value('원란입고')), When(type='07', then=Value('원란판매')), When(type='09', then=Value('OEM입고거래처')), default=Value('판매'), output_field=CharField()))\ .annotate(character_string=Case( When(location_character='01', then=Value('B2B')), When(location_character='02', then=Value('급식')), When(location_character='03', then=Value('미군납')), When(location_character='04', then=Value('백화점')), When(location_character='05', then=Value('온라인')), When(location_character='06', then=Value('자사몰')), When(location_character='07', then=Value('직거래')), When(location_character='08', then=Value('특판')), When(location_character='09', then=Value('하이퍼')), default=Value('기타'), output_field=CharField())) \ .annotate(location_manager_string=F('location_manager__first_name')) if order == 'desc': order_column = '-' + order_column total = queryset.count() if search_value: queryset = queryset.filter(codeName__icontains=search_value) count = queryset.count() queryset = queryset.order_by(order_column)[start:start + length] return { 'items': queryset, 'count': count, 'total': total, 'draw': draw }
def test_case_aggregate(self): agg = Sum( Case(When(friends__age=40, then=F('friends__age'))), filter=Q(friends__name__startswith='test'), ) self.assertEqual(Author.objects.aggregate(age=agg)['age'], 80)
def sort_by_attribute(self, attribute_pk: Union[int, str], ascending: bool = True): """Sort a query set by the values of the given product attribute. :param attribute_pk: The database ID (must be a number) of the attribute to sort by. :param ascending: The sorting direction. """ qs: models.QuerySet = self # Retrieve all the products' attribute data IDs (assignments) and # product types that have the given attribute associated to them associated_values = tuple( AttributeProduct.objects.filter( attribute_id=attribute_pk).values_list("pk", "product_type_id")) if not associated_values: if not ascending: return qs.reverse() return qs attribute_associations, product_types_associated_to_attribute = zip( *associated_values) qs = qs.annotate( # Contains to retrieve the attribute data (singular) of each product # Refer to `AttributeProduct`. filtered_attribute=FilteredRelation( relation_name="attributes", condition=Q( attributes__assignment_id__in=attribute_associations), ), # Implicit `GROUP BY` required for the `StringAgg` aggregation grouped_ids=Count("id"), # String aggregation of the attribute's values to efficiently sort them concatenated_values=Case( # If the product has no association data but has the given attribute # associated to its product type, then consider the concatenated values # as empty (non-null). When( Q(product_type_id__in=product_types_associated_to_attribute ) & Q(filtered_attribute=None), then=models.Value(""), ), default=StringAgg( F("filtered_attribute__values__name"), delimiter=",", ordering=([ f"filtered_attribute__values__{field_name}" for field_name in AttributeValue._meta.ordering or [] ]), ), output_field=models.CharField(), ), concatenated_values_order=Case( # Make the products having no such attribute be last in the sorting When(concatenated_values=None, then=2), # Put the products having an empty attribute value at the bottom of # the other products. When(concatenated_values="", then=1), # Put the products having an attribute value to be always at the top default=0, output_field=models.IntegerField(), ), ) # Sort by concatenated_values_order then # Sort each group of products (0, 1, 2, ...) per attribute values # Sort each group of products by name, # if they have the same values or not values qs = qs.order_by("concatenated_values_order", "concatenated_values", "name") # Descending sorting if not ascending: return qs.reverse() return qs
def between_include_start(column, start, end, value=1): return When(Q(**{ column + '__gte': start, column + '__lt': end }), then=value)
def _get_queryset(self, cl, form_data): cqs = Checkin.objects.filter( position_id=OuterRef('pk'), list_id=cl.pk).order_by().values('position_id').annotate( m=Max('datetime')).values('m') cqsin = cqs.filter(type=Checkin.TYPE_ENTRY) cqsout = cqs.filter(type=Checkin.TYPE_EXIT) qs = OrderPosition.objects.filter(order__event=self.event, ).annotate( last_checked_in=Subquery(cqsin), last_checked_out=Subquery(cqsout), auto_checked_in=Exists( Checkin.objects.filter( position_id=OuterRef('pk'), list_id=cl.pk, auto_checked_in=True))).prefetch_related( 'answers', 'answers__question', 'addon_to__answers', 'addon_to__answers__question').select_related( 'order', 'item', 'variation', 'addon_to', 'order__invoice_address', 'voucher', 'seat') if not cl.all_products: qs = qs.filter( item__in=cl.limit_products.values_list('id', flat=True)) if cl.subevent: qs = qs.filter(subevent=cl.subevent) if form_data.get('date_from'): dt = make_aware( datetime.combine( dateutil.parser.parse(form_data['date_from']).date(), time(hour=0, minute=0, second=0)), self.event.timezone) qs = qs.filter(subevent__date_from__gte=dt) if form_data.get('date_to'): dt = make_aware( datetime.combine( dateutil.parser.parse(form_data['date_to']).date() + timedelta(days=1), time(hour=0, minute=0, second=0)), self.event.timezone) qs = qs.filter(subevent__date_from__lt=dt) o = () if self.event.has_subevents and not cl.subevent: o = ('subevent__date_from', 'subevent__name') sort = form_data.get('sort') or 'name' if sort == 'name': qs = qs.order_by( *o, Coalesce( NullIf('attendee_name_cached', Value('')), NullIf('addon_to__attendee_name_cached', Value('')), NullIf('order__invoice_address__name_cached', Value('')), 'order__code')) elif sort == 'code': qs = qs.order_by(*o, 'order__code') elif sort.startswith('name:'): part = sort[5:] qs = qs.annotate(resolved_name=Case( When(attendee_name_cached__ne='', then='attendee_name_parts'), When(addon_to__attendee_name_cached__isnull=False, addon_to__attendee_name_cached__ne='', then='addon_to__attendee_name_parts'), default='order__invoice_address__name_parts', )).annotate(resolved_name_part=JSONExtract( 'resolved_name', part)).order_by(*o, 'resolved_name_part') if form_data.get('attention_only'): qs = qs.filter( Q(item__checkin_attention=True) | Q(order__checkin_attention=True)) if not cl.include_pending: qs = qs.filter(order__status=Order.STATUS_PAID) else: qs = qs.filter(order__status__in=(Order.STATUS_PAID, Order.STATUS_PENDING)) return qs
def query(self, request): cdata = self.cleaned_data limit_size = cdata.get('limit_size') offset_size = cdata.get('offset_size') search_content = cdata.get('search_content') # 获取用户的权限,用于前端表格的列的显示 role_name = request.user.user_role() perm_list = list( RolePermission.objects.filter(role__role_name=role_name).values_list('permission_name', flat=True)) permissions = {'permissions': perm_list} query = SqlOrdersContents.objects.filter( Q(proposer=request.user.username) | Q(auditor=request.user.username)).annotate( progress_value=Case( When(progress='0', then=Value('待批准')), When(progress='1', then=Value('未批准')), When(progress='2', then=Value('已批准')), When(progress='3', then=Value('处理中')), When(progress='4', then=Value('已完成')), When(progress='5', then=Value('已关闭')), When(progress='6', then=Value('已勾住')), output_field=CharField(), ), progress_color=Case( When(progress__in=('0',), then=Value('btn-primary')), When(progress__in=('2',), then=Value('btn-warning')), When(progress__in=('1', '5'), then=Value('btn-danger')), When(progress__in=('3',), then=Value('btn-info')), When(progress__in=('4',), then=Value('btn-success')), When(progress__in=('6',), then=Value('btn-default')), output_field=CharField(), ), ) if search_content: obj = query.filter(Q(task_version__icontains=search_content) | Q(title__icontains=search_content) | Q( proposer__icontains=search_content) | Q( host__icontains=search_content) | Q(host__icontains=search_content) | Q( database__icontains=search_content) | Q(contents__icontains=search_content)) else: obj = query ol_total = obj.count() ol_records = obj.values('progress_color', 'task_version', 'host', 'port', 'sql_type', 'database', 'progress_value', 'id', 'envi_id', 'title', 'proposer', 'auditor', 'created_at', 'remark' ).order_by('-created_at')[offset_size:limit_size] rows = [] for x in list(ol_records): x.update(permissions) rows.append(x) result = {'total': ol_total, 'rows': rows} return result
def search(request, **kwargs): query = request.GET.get('query', None) if not query or not isinstance(query, str): return HttpResponseBadRequest('invalid query') count = int(request.GET.get('count', 10)) page = int(request.GET.get('page', 1)) if query == 'timezones': ret = {} for tz in get_timezones(): ret[tz["name"]] = f'{tz["name"]} {tz["repr"]}' return JsonResponse(ret) elif query == 'resources-for-add-account': coder = request.user.coder coder_accounts = coder.account_set.filter(resource=OuterRef('pk')) qs = Resource.objects \ .annotate(has_coder_account=Exists(coder_accounts)) \ .annotate(has_multi=F('module__multi_account_allowed')) \ .annotate(disabled=Case( When(module__isnull=True, then=Value(True)), When(has_coder_account=True, has_multi=False, then=Value(True)), default=Value(False), output_field=BooleanField(), )) if 'regex' in request.GET: qs = qs.filter(get_iregex_filter(request.GET['regex'], 'host')) qs = qs.order_by('disabled', 'pk') total = qs.count() qs = qs[(page - 1) * count:page * count] ret = [{ 'id': r.id, 'text': r.host, 'disabled': r.disabled, } for r in qs] elif query == 'accounts-for-add-account': coder = request.user.coder qs = Account.objects.all() resource = request.GET.get('resource') if resource: qs = qs.filter(resource__id=int(resource)) else: qs = qs.select_related('resource') if 'user' in request.GET: qs = qs.filter( get_iregex_filter(request.GET.get('user'), 'key', 'name')) qs = qs.annotate(has_multi=F('resource__module__multi_account_allowed')) \ qs = qs.annotate(disabled=Case( When(coders=coder, then=Value(True)), When(coders__isnull=False, has_multi=False, then=Value(True)), default=Value(False), output_field=BooleanField(), )) qs = qs.order_by('disabled') total = qs.count() qs = qs[(page - 1) * count:page * count] ret = [] for r in qs: fields = { 'id': r.key, 'text': f'{r.key} - {r.name}' if r.name and r.key.find(r.name) == -1 else r.key, 'disabled': r.disabled, } if not resource: fields['text'] += f', {r.resource.host}' fields['resource'] = { 'id': r.resource.pk, 'text': r.resource.host } ret.append(fields) elif query == 'organization': qs = Organization.objects.all() name = request.GET.get('name') if name: qs = qs.filter( Q(name__icontains=name) | Q(name_ru__icontains=name) | Q(abbreviation__icontains=name)) total = qs.count() qs = qs[(page - 1) * count:page * count] ret = [{'id': o.name, 'text': o.name} for o in qs] elif query == 'team': qs = Team.objects.all() name = request.GET.get('name') if name: qs = qs.filter(name__icontains=name) event = kwargs.get('event') if event: qs = qs.filter(event=event) qs = qs.annotate( disabled=Case(When(status=TeamStatus.NEW, then=Value(False)), default=Value(True), output_field=BooleanField())).order_by( 'disabled', '-modified') total = qs.count() qs = qs[(page - 1) * count:page * count] ret = [{ 'id': r.id, 'text': r.name, 'disabled': r.disabled } for r in qs] elif query == 'country': qs = list(countries) name = request.GET.get('name') if name: name = name.lower() qs = [(c, n) for c, n in countries if name in n.lower()] total = len(qs) qs = qs[(page - 1) * count:page * count] ret = [{'id': c, 'text': n} for c, n in qs] elif query == 'notpast': title = request.GET.get('title') qs = Contest.objects.filter(title__iregex=verify_regex(title), end_time__gte=timezone.now()) total = qs.count() qs = qs[(page - 1) * count:page * count] ret = [{'id': c.id, 'text': c.title} for c in qs] elif query == 'field-to-select': contest = get_object_or_404(Contest, pk=request.GET.get('cid')) text = request.GET.get('text') field = request.GET.get('field') assert '__' not in field if field == 'languages': qs = contest.info.get('languages', []) qs = [[q] for q in qs if not text or text.lower() in q.lower()] total = len(qs) else: field = f'addition__{field}' qs = contest.statistics_set if text: qs = qs.filter(**{f'{field}__icontains': text}) qs = qs.distinct(field).values_list(field) total = qs.count() qs = qs[(page - 1) * count:page * count] ret = [{'id': f[0], 'text': f[0]} for f in qs] else: return HttpResponseBadRequest('invalid query') result = { 'items': ret, 'more': page * count <= total, } return HttpResponse(json.dumps(result, ensure_ascii=False), content_type="application/json")
def get_queryset(self): queryset = (Borrower.objects.prefetch_related( Prefetch( "individual", queryset=Individual.objects.annotate( _titleName=F("title__description"), _genderName=F("gender__description"), _countryOfBirthName=F("countryOfBirth__description"), _nationalityName=F("nationality__description"), _maritalStatusName=F("maritalStatus__description"), _religionName=F("religion__name"), ).all(), ), Prefetch( "business", queryset=Business.objects.annotate( _nationalityName=F("nationality__description"), _legalFormName=F("legalForm__description"), _psicName=F("psic__description"), _firmSizeName=F("firmSize__description"), ).all(), ), "borrowerAttachments", Prefetch("documents", queryset=Document.objects.order_by("dateCreated")), Prefetch( "loans", queryset=Loan.objects.order_by("dateReleased"), ), Prefetch( "documents__documentMovements", queryset=DocumentMovement.objects.order_by("-dateCreated")), ).annotate( borrowerName=Case( When(Q(recordType="BD"), then=F("business__tradeName")), When( Q(recordType="ID"), then=Concat( F("individual__firstname"), V(" "), F("individual__middlename"), V(" "), F("individual__lastname"), ), ), ), borrowerType=Case( When(recordType="BD", then=V("Business")), When(recordType="ID", then=V("Individual")), output_field=models.CharField(), ), borrowerAddress=Case( When( Q(recordType="BD"), then=Concat( F("business__businessAddress__streetNo"), V(" "), F("business__businessAddress__barangay"), V(" "), F("business__businessAddress__city"), V(" "), F("business__businessAddress__province"), ), ), When( Q(recordType="ID"), then=Concat( F("individual__individualAddress__streetNo"), V(" "), F("individual__individualAddress__barangay"), V(" "), F("individual__individualAddress__city"), V(" "), F("individual__individualAddress__province"), ), ), ), contactPersonNumber=Case( When( Q(recordType="BD"), then=F("business__businessContactPerson__phoneNo"), ), ), areaCode=Case( When( Q(recordType="BD"), then=F("area__branchCode"), ), ), tin=Case( When( Q(recordType="BD") & Q(business__businessIdentification__identificationType__value ="10"), then=F( "business__businessIdentification__identificationNumber" ), ), When( Q(recordType="ID") & Q(individual__individualIdentification__identificationType__value ="10"), then= F("individual__individualIdentification__identificationNumber" ), ), ), ).exclude(isDeleted=True).order_by("-borrowerId")) borrowerId = self.request.query_params.get("borrowerId", None) branch = self.request.query_params.get("branch", None) loanProgramId = self.request.query_params.get("loanProgramId", None) totalAvailmentsFrom = self.request.query_params.get( "totalAvailmentsFrom", None) totalAvailmentsTo = self.request.query_params.get( "totalAvailmentsTo", None) totalOutstandingBalanceFrom = self.request.query_params.get( "totalOutstandingBalanceFrom", None) totalOutstandingBalanceTo = self.request.query_params.get( "totalOutstandingBalanceTo", None) totalPaymentsFrom = self.request.query_params.get( "totalPaymentsFrom", None) totalPaymentsTo = self.request.query_params.get( "totalPaymentsTo", None) clientSinceFrom = self.request.query_params.get( "clientSinceFrom", None) clientSinceTo = self.request.query_params.get("clientSinceTo", None) if borrowerId is not None: queryset = queryset.filter(borrowerId=borrowerId) if branch is not None: queryset = queryset.filter(area=branch) if totalAvailmentsFrom is not None and totalAvailmentsTo is not None: borrowers = [] for borrower in queryset: borrower.totalAvailments = borrower.getTotalAvailments() if (int(borrower.totalAvailments) >= int(totalAvailmentsFrom) ) and (int(borrower.totalAvailments) <= int(totalAvailmentsTo)): borrowers.append(borrower.pk) queryset = queryset.filter(borrowerId__in=borrowers) if totalOutstandingBalanceFrom is not None and totalOutstandingBalanceTo is not None: borrowers = [] for borrower in queryset: borrower.totalOutstandingBalance = borrower.getTotalOutstandingBalance( ) if (int(borrower.totalOutstandingBalance) >= int(totalOutstandingBalanceFrom)) and ( int(borrower.totalOutstandingBalance) <= int(totalOutstandingBalanceTo)): borrowers.append(borrower.pk) queryset = queryset.filter(borrowerId__in=borrowers) if totalPaymentsFrom is not None and totalPaymentsTo is not None: borrowers = [] for borrower in queryset: borrower.payments = borrower.getPayments() borrower.totalPayments = borrower.getTotalPayments() if (int(borrower.totalPayments) >= int(totalPaymentsFrom)) and (int( borrower.totalPayments) <= int(totalPaymentsTo)): borrowers.append(borrower.pk) queryset = queryset.filter(borrowerId__in=borrowers) if clientSinceFrom is not None and clientSinceTo is not None: queryset = queryset.filter( accreditationDate__gte=clientSinceFrom).filter( accreditationDate__lte=clientSinceTo) for borrower in queryset: borrower.totalAvailments = borrower.getTotalAvailments() borrower.totalOutstandingBalance = borrower.getTotalOutstandingBalance( ) borrower.payments = borrower.getPayments() borrower.totalPayments = borrower.getTotalPayments() for loan in borrower.loans.all(): loan.totalAmortizationInterest = loan.getTotalAmortizationInterest loan.totalPayment = loan.getTotalPayment loan.totalPrincipalPayment = loan.getTotalPrincipalPayment() loan.totalInterestPayment = loan.getTotaInterestPayment() loan.totalAccruedInterestPayment = loan.getTotalAccruedInterestPayment( ) loan.totalTotalInterestPayment = loan.getTotalTotalInterestPayment( ) loan.totalPenaltyPayment = loan.getTotalPenaltyPayment() loan.totalAdditionalInterestPayment = loan.getTotalAdditionalInterestPayment( ) loan.lastAmortizationItem = loan.getLastAmortizationItem() loan.latestAmortization = loan.getLatestAmortization() if loan.latestAmortization: loan.latestAmortization.totalAmortizationPrincipal = ( loan.latestAmortization.getTotalAmortizationPrincipal( )) print(loan.latestAmortization.totalAmortizationPrincipal) if loanProgramId is not None: borrower.totalAvailmentPerProgram = borrower.getTotalAvailmentsPerProgram( loanProgramId) return queryset
def detail_asset_view(request, asset_id): asset = get_object_or_404(Asset, id=asset_id) findings = Finding.objects.filter(asset=asset).annotate( severity_numm=Case( When(severity="critical", then=Value("0")), When(severity="high", then=Value("1")), When(severity="medium", then=Value("2")), When(severity="low", then=Value("3")), When(severity="info", then=Value("4")), default=Value("1"), output_field=CharField()) ).annotate( scope_list=ArrayAgg('scopes__name') ).order_by('severity_numm', 'type', 'updated_at') findings_stats = {'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'new': 0, 'ack': 0, 'cvss_gte_7': 0} engines_stats = {} references = {} engine_scopes = {} for engine_scope in EnginePolicyScope.objects.all(): engine_scopes.update({ engine_scope.name: { 'priority': engine_scope.priority, 'id': engine_scope.id, 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0 } }) for finding in findings: findings_stats['total'] = findings_stats.get('total', 0) + 1 findings_stats[finding.severity] = findings_stats.get(finding.severity, 0) + 1 if finding.status == 'new': findings_stats['new'] = findings_stats.get('new', 0) + 1 if finding.status == 'ack': findings_stats['ack'] = findings_stats.get('ack', 0) + 1 for fs in finding.scope_list: if fs is not None: c = engine_scopes[fs] engine_scopes[fs].update({ 'total': c['total']+1, finding.severity: c[finding.severity]+1 }) if finding.engine_type not in engines_stats.keys(): engines_stats.update({finding.engine_type: 0}) engines_stats[finding.engine_type] = engines_stats.get(finding.engine_type, 0) + 1 if finding.risk_info["cvss_base_score"] > 7.0: findings_stats['cvss_gte_7'] = findings_stats.get('cvss_gte_7', 0) + 1 if bool(finding.vuln_refs): for ref in finding.vuln_refs.keys(): if ref not in references.keys(): references.update({ref: []}) tref = references[ref] if type(finding.vuln_refs[ref]) is list: tref = tref + finding.vuln_refs[ref] else: tref.append(finding.vuln_refs[ref]) # references.update({ref: list(set(tref))}) references.update({ref: tref}) # Show only unique references references_cleaned = {} for ref in references: references_cleaned.update({ref: sorted(list(set(references[ref])))}) # Related scans scans_stats = { 'performed': Scan.objects.filter(assets__in=[asset]).count(), 'defined': ScanDefinition.objects.filter(assets_list__in=[asset]).count(), 'periodic': ScanDefinition.objects.filter(assets_list__in=[asset], scan_type='periodic').count(), 'ondemand': ScanDefinition.objects.filter(assets_list__in=[asset], scan_type='single').count(), 'running': Scan.objects.filter(assets__in=[asset], status='started').count(), # bug: a regrouper par assets 'lasts': Scan.objects.filter(assets__in=[asset]).order_by('-created_at')[:3] } asset_groups = list(AssetGroup.objects.filter(assets__in=[asset]).only("id")) scan_defs = ScanDefinition.objects.filter(Q(assets_list__in=[asset]) | Q(assetgroups_list__in=asset_groups)).annotate(engine_type_name=F('engine_type__name')).annotate(scan_set_count=Count('scan')) scans = Scan.objects.filter(assets__in=[asset]).values("id", "title", "status", "summary", "updated_at").annotate(engine_type_name=F('engine_type__name')) # Investigation links investigation_links = [] DEFAULT_LINKS = copy.deepcopy(ASSET_INVESTIGATION_LINKS) for i in DEFAULT_LINKS: if asset.type in i["datatypes"]: i["link"] = i["link"].replace("%asset%", asset.value) investigation_links.append(i) # Calculate automatically risk grade asset.calc_risk_grade() asset_risk_grade = { 'now': asset.get_risk_grade(), 'day_ago': asset.get_risk_grade(history=1), 'week_ago': asset.get_risk_grade(history=7), 'month_ago': asset.get_risk_grade(history=30), 'year_ago': asset.get_risk_grade(history=365) } return render(request, 'details-asset.html', { 'asset': asset, 'asset_risk_grade': asset_risk_grade, 'findings': findings, 'findings_stats': findings_stats, 'references': references_cleaned, 'scans_stats': scans_stats, 'scans': scans, 'scan_defs': scan_defs, 'investigation_links': investigation_links, 'engines_stats': engines_stats, 'asset_scopes': sorted(engine_scopes.iteritems(), key=lambda (x, y): y['priority']) })
def query(self, request): sdata = self.validated_data limit_size = sdata.get('limit_size') offset_size = sdata.get('offset_size') search_content = sdata.get('search_content') query = Orders.objects.filter( applicant=request.user.username).annotate( progress_value=Case( When(progress='0', then=Value('待批准')), When(progress='1', then=Value('未批准')), When(progress='2', then=Value('已批准')), When(progress='3', then=Value('处理中')), When(progress='4', then=Value('已完成')), When(progress='5', then=Value('已关闭')), When(progress='6', then=Value('已复核')), When(progress='7', then=Value('已勾住')), output_field=CharField(), ), progress_color=Case( When(progress__in=('0', ), then=Value('btn-primary')), When(progress__in=('2', ), then=Value('btn-warning')), When(progress__in=('1', '5'), then=Value('btn-danger')), When(progress__in=('3', ), then=Value('bg-navy')), When(progress__in=('4', ), then=Value('btn-success')), When(progress__in=('6', ), then=Value('bg-purple')), When(progress__in=('7', ), then=Value('btn-default')), output_field=CharField(), ), task_version=Case( When(version__version__isnull=True, then=Value('')), When(version__version__isnull=False, then=F('version__version')), output_field=CharField(), ), envi_name=F('envi__envi_name')) if search_content: obj = query.filter( Q(version__version__icontains=search_content) | Q(title__icontains=search_content) | Q(applicant__icontains=search_content) | Q(auditor__icontains=search_content) | Q(reviewer__icontains=search_content) | Q(host__icontains=search_content) | Q(database__icontains=search_content) | Q(contents__icontains=search_content)) else: obj = query total = obj.count() data = obj.values( 'id', 'envi_id', 'envi_name', 'task_version', 'host', 'port', 'database', 'sql_type', 'title', 'progress_value', 'progress_color', 'remark', 'applicant', 'auditor', 'reviewer', 'created_at').order_by('-created_at')[offset_size:limit_size] result = {'total': total, 'rows': data} return result