def third_party_costs_by_month(date_range): costs = defaultdict(lambda: Z2) for result in (LoggedCost.objects.filter( rendered_on__range=date_range, third_party_costs__isnull=False, invoice_service__isnull=True, ).order_by().annotate(year=ExtractYear("rendered_on"), month=ExtractMonth("rendered_on")).values( "year", "month").annotate(Sum("third_party_costs"))): costs[(result["year"], result["month"])] -= result["third_party_costs__sum"] for result in (Invoice.objects.invoiced().filter(~Q( type=Invoice.DOWN_PAYMENT)).order_by().filter( Q(invoiced_on__range=date_range), ~Q(third_party_costs=Z2)).annotate( year=ExtractYear("invoiced_on"), month=ExtractMonth("invoiced_on")).values( "year", "month").annotate(Sum("third_party_costs"))): costs[(result["year"], result["month"])] -= result["third_party_costs__sum"] return costs
def test_max_per_year(self): """ Find the maximum salary awarded in the same year as the employee was hired, regardless of the department. """ qs = Employee.objects.annotate(max_salary_year=Window( expression=Max('salary'), order_by=ExtractYear('hire_date').asc(), partition_by=ExtractYear('hire_date'))).order_by( ExtractYear('hire_date'), 'salary') self.assertQuerysetEqual( qs, [ ('Jones', 'Accounting', 45000, 2005, 100000), ('Johnson', 'Management', 80000, 2005, 100000), ('Miller', 'Management', 100000, 2005, 100000), ('Smith', 'Sales', 55000, 2007, 55000), ('Jenson', 'Accounting', 45000, 2008, 45000), ('Williams', 'Accounting', 37000, 2009, 53000), ('Smith', 'Marketing', 38000, 2009, 53000), ('Brown', 'Sales', 53000, 2009, 53000), ('Wilkinson', 'IT', 60000, 2011, 60000), ('Johnson', 'Marketing', 40000, 2012, 40000), ('Moore', 'IT', 34000, 2013, 50000), ('Adams', 'Accounting', 50000, 2013, 50000), ], lambda row: (row.name, row.department, row.salary, row.hire_date .year, row.max_salary_year))
def calculate_procent(self, obj): run_current = "user__cards__run" expression = "user__cards__cards_user__card" date = expression + "__date" run = Q(user__cards__cards_user__card__change__run=0) time_remained = expression + "__change__time" run_remained = expression + "__change__run" run_initial = expression + "__change__initial_run" return obj.annotate(now_days=ExtractDay(Now()), now_month=ExtractMonth(Now()), now_year=ExtractYear(Now()), days=ExtractDay(date), month=ExtractMonth(date), year=ExtractYear(date), ).annotate( passed_days=ExpressionWrapper(F('now_days') - F('days') + 31 * (F('now_month') - F('month')) + 365 * ( F('now_year') - F('year')), output_field=FloatField()), run_total=F(run_remained) - F(run_initial), run_passed=F(run_current) - F(run_initial), ).annotate( procent=Case( When(Q(passed_days__gt=0) & run, then=ExpressionWrapper(F('passed_days') / F(time_remained) * 100, output_field=FloatField())), When(~Q(run_passed__lte=0), then=ExpressionWrapper(F('run_passed') / F('run_total') * 100, output_field=FloatField())), default=0.0, ), )
def test_extract_year_func(self): start_datetime = microsecond_support( datetime(2015, 6, 15, 14, 30, 50, 321)) end_datetime = microsecond_support( datetime(2016, 6, 15, 14, 10, 50, 123)) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractYear( 'start_datetime')).order_by('start_datetime'), [(start_datetime, start_datetime.year), (end_datetime, end_datetime.year)], lambda m: (m.start_datetime, m.extracted)) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractYear( 'start_date')).order_by('start_datetime'), [(start_datetime, start_datetime.year), (end_datetime, end_datetime.year)], lambda m: (m.start_datetime, m.extracted)) self.assertEqual( DTModel.objects.filter( start_datetime__year=ExtractYear('start_datetime')).count(), 2)
def _sum_queryset(self, field, queryset): sum_when = lambda **kwargs: Coalesce( Sum(Case(When(then=F('value'), **kwargs), default=0)), 0) sum_effective = lambda **kwargs: sum_when( **kwargs, due_date__month=ExtractMonth('date'), due_date__year=ExtractYear('date')) sum_real = lambda **kwargs: sum_when( **kwargs, payment_date__isnull=False, payment_date__month=ExtractMonth('date'), payment_date__year=ExtractYear('date')) sum_effective_kind = lambda kind: sum_effective(kind=kind) sum_real_kind = lambda kind: sum_real(kind=kind) #TruncMonth cares about year too return queryset.annotate(date=TruncMonth(field))\ .values('date')\ .annotate( effective_expenses=sum_effective_kind(HasKind.EXPENSE_KIND), effective_incomes=sum_effective_kind(HasKind.INCOME_KIND), real_expenses=sum_real_kind(HasKind.EXPENSE_KIND), real_incomes=sum_real_kind(HasKind.INCOME_KIND), effective_total=sum_effective(), real_total=sum_real() )
def get_individuals_affected_data(): years_qs = Breach.objects.annotate(year=ExtractYear('date_created'))\ .values('year').distinct() years = sorted([qe['year'] for qe in years_qs]) breach_types_qs = Breach.objects.values('breach_type').distinct() breach_types = [breach_type.strip() for qe in breach_types_qs for breach_type in qe['breach_type'].split(',')] breach_types = sorted(list(set(breach_types))) indiv_affec_qs = Breach.objects.annotate( p_breach_type=Trim('breach_type'), p_year=ExtractYear('date_created'),)\ .values('p_breach_type', 'p_year')\ .annotate(total=Sum('individuals_affected')) indiv_affec = {breach_type: dict.fromkeys(years, 0) for breach_type in breach_types} for qe in indiv_affec_qs: for breach_type in qe['p_breach_type'].split(','): indiv_affec[breach_type.strip()][qe['p_year']] += qe['total'] series = [] for breach_type in breach_types: element = {} element['name'] = breach_type element['data'] = [] for year in years: element['data'].append(indiv_affec[breach_type][year]) series.append(element) return years, series
def GenderDataView(request): gender = Group.objects.all().values('gender').annotate( g=Count('id'), v=Sum('video_count'), i=Sum('current_member_count'), views=Sum('total_view_count')).values('gender', 'g', 'v', 'i', 'views') for i, g in enumerate(gender): if g['gender'] == 'B': gender[i]['gender'] = 'Boys' elif g['gender'] == 'G': gender[i]['gender'] = 'Girls' elif g['gender'] == 'M': gender[i]['gender'] = 'Mixed' genderYear = Group.objects.all().values('gender').annotate(year=ExtractYear('debut_date'), gcount=Count('gender'))\ .values('gender', 'year', 'gcount') genderY = [] minYear = Group.objects.all().annotate(year=ExtractYear( 'debut_date')).values('year').order_by('year')[0]['year'] maxYear = Group.objects.all().annotate(year=ExtractYear('debut_date')).values('year').order_by('year')\ .reverse()[0]['year'] for i in range(minYear, maxYear + 1): genderY.append([i, 0, 0, 0]) for j in genderYear: if j['gender'] == 'B': for e in genderY: if e[0] >= j['year']: e[1] += j['gcount'] if j['gender'] == 'G': for e in genderY: if e[0] >= j['year']: e[2] += j['gcount'] if j['gender'] == 'M': for e in genderY: if e[0] >= j['year']: e[3] += j['gcount'] ''' gender: Group model data by gender genderYear: gender data by year minYear: earliest year by gender maxYear: most recent year by gender genderY: more data by year ''' return render( request, 'stats/genderData.html', { "gender": gender, "genderYear": genderYear, "minYear": minYear, "maxYear": maxYear, "genderY": genderY })
def grouped(request): metrics = { 'sector': Count('brief_id'), #'avg_charged_amount': Sum('charged_amount'), 'frequency': Count('sector2', distinct=True), } # results = ( # Sale.objects # .values('merchant', 'device') # .annotate(**metrics) #) bySector = Briefs.objects.order_by('frequency').values('sector2').annotate( frequency=Count('brief_id')) byCategory = Briefs.objects.order_by('-frequency')[:20].values( 'category').annotate(frequency=Count('brief_id')) byDeveloper = Briefs.objects.order_by('-frequency')[:20].values( 'developer', 'sector').annotate(frequency=Count('brief_id')) #qs = Purchases.objects.annotate(year=ExtractYear('date')).filter(year = today.year) yearTrend2 = Briefs.objects.order_by('yearx')[:100].values( 'received').annotate( yearx=ExtractYear('received')) #.aggregate(Count('received')) #yearTrend = Briefs.objects.all()[:10].annotate(yearx=TruncYear('received')).values('yearx').annotate(frequency=Count('brief_id')) #, distinct=True #yearTrend = Briefs.objects.order_by('yearx').annotate(yearx=TruncYear('received')).values('yearx').annotate(frequency=Count('brief_id')) #, distinct=True yearTrend = Briefs.objects.order_by('yearx').annotate( yearx=ExtractYear('received')).values('yearx').annotate( frequency=Count('brief_id')) #, distinct=True #yearTrend = Briefs.objects.order_by('received')[:10].values('received').annotate(frequency=Count(ExtractYear('received'))) byLocation = Briefs.objects.order_by('-frequency')[:20].values( 'locationx').annotate(frequency=Count('brief_id')) #yearTrend = Briefs.objects.order_by('frequency')[:10].values(ExtractYear('received')).annotate(frequency=Count('received')) #byOfficer = Briefs.objects.order_by('frequency').values('sector2').annotate(frequency=Count('brief_id')) #byConsultant = Briefs.objects.order_by('frequency').values('sector2').annotate(frequency=Count('brief_id')) #sectorsx = Briefs.objects.order_by('frequency').values('sector2').annotate(frequency=Count('brief_id')) #sectorsx = Briefs.objects.order_by('frequency').values('sector2').annotate(frequency=Count('brief_id')) #sectorsx = Briefs.objects.order_by('frequency').values('sector2').annotate(frequency=Count('brief_id')) #sectorsx = Briefs.objects.order_by('frequency').values('sector2').annotate(frequency=Count('brief_id')) context = { "sector": bySector, "category": byCategory, #"objectx":sectorsx, "locations": byLocation, "byYear": yearTrend, "byDeveloper": byDeveloper } #Members.objects.values('designation').annotate(dcount=Count('designation')) #akin to SELECT designation, COUNT(designation) AS dcount FROM members GROUP BY designation dir(yearTrend) return render(request=request, template_name='pulse/grouped.htm', context=context)
def get(self, request): today = datetime.today().date() created_notes = DiaryNote.objects.filter(user=request.user).annotate \ (Year=ExtractYear('date')).annotate(month=ExtractMonth('date')).order_by('date') notes_as_participant = DiaryNote.objects.filter(participants=request.user).annotate \ (Year=ExtractYear('date')).annotate(month=ExtractMonth('date')).order_by('date') return render( request, 'notes_current_user.html', { 'created_notes': created_notes, 'notes_as_participant': notes_as_participant, 'today': today })
def calculate_average(self, obj): return obj.annotate(now_days=ExtractDay(Now()), now_month=ExtractMonth(Now()), now_year=ExtractYear(Now()), days=ExtractDay('user__date'), month=ExtractMonth('user__date'), year=ExtractYear('user__date'), ).annotate( calculate_days=F('now_days') - F('days') + 31 * (F('now_month') - F('month')) + 365 * ( F('now_year') - F('year')), run_passed=ExpressionWrapper(F('user__cards__run') - F('user__cards__initial_run'), output_field=FloatField()) ).annotate(average_speed=ExpressionWrapper(F('run_passed') / F('calculate_days'), output_field=FloatField()))
def user_timeline(request, pk): month_name = { 1: 'Jan', 2: 'Feb', 3: 'Mar', 4: 'Apr', 5: 'May', 6: 'Jun', 7: 'Jul', 8: 'Aug', 9: 'Sep', 10: 'Oct', 11: 'Nov', 12: 'Dec' } user = ExUser.objects.get(pk=pk) user_posts = Post.objects.filter(user__id=user.id) month_year = user_posts.annotate(month=ExtractMonth('timestamp'), year=ExtractYear('timestamp')).values( 'month', 'year').distinct().order_by('-month') total_posts = user_posts.count() a = [] for f in month_year: a.append((month_name[f['month']] + " " + str(f['year']), user_posts.filter(timestamp__month=f['month'], timestamp__year=f['year']))) return render(request, 'exploreuser/timeline.html', { 'l': a, 'u': user, 'total_posts': total_posts })
def getIncrementedData(): dataFromDM = dailyData.objects.values( day=ExtractDay('when'), month=ExtractMonth('when'), year = ExtractYear('when') ).annotate(Sum('confirmedCases'), Sum('curedCases'), Sum('deathCases')) dataFromDM= dataFromDM.order_by('month') print(dataFromDM) print(len(dataFromDM)) incrementedConfirmedCases,incrementedCuredCases, incrementedDeathCases = dfd(int), dfd(int), dfd(int) temp1, temp2, temp3 = 25435,5000,800 for i in dataFromDM: d='{}/{}/{}'.format(i['day'],i['month'],i['year']) incrementedConfirmedCases[d]=(i['confirmedCases__sum'] - temp1) incrementedCuredCases[d]=(i['curedCases__sum'] - temp2) incrementedDeathCases[d]=(i['deathCases__sum'] - temp3) temp1 = i['confirmedCases__sum'] temp2 = i['curedCases__sum'] temp3 = i['deathCases__sum'] #print(i['confirmedCases__sum'],d) print(incrementedConfirmedCases) print(incrementedCuredCases) print(incrementedDeathCases) dateOfCnfInc ,dataOfCnfInc = list(incrementedConfirmedCases.keys()), list(incrementedConfirmedCases.values()) dateOfCurInc ,dataOfCurInc = list(incrementedCuredCases.keys()), list(incrementedCuredCases.values()) dateOfDthInc ,dataOfDthInc = list(incrementedDeathCases.keys()), list(incrementedDeathCases.values()) return dateOfCnfInc ,dataOfCnfInc,dateOfCurInc ,dataOfCurInc,dateOfDthInc ,dataOfDthInc
def get_monthly_revenue_data(from_date, to_date): qs = Payment.objects.all().filter(completed_pay_date__gte=from_date, completed_pay_date__lte=to_date) qs = qs.annotate( centre_id=F('student_debt__student__user__centre__id'), centre_name=F('student_debt__student__user__centre__name'), year=ExtractYear('completed_pay_date'), month=ExtractMonth('completed_pay_date')) qs = qs.values('centre_id', 'year', 'month') # qs = qs.annotate(key=Concat('centre_id', Value('_'), 'year', F('month').zfill(2))) qs = qs.annotate(sum_paid_amount=Sum(F('paid_amount'))) qs = qs.values_list('centre_id', 'year', 'month', 'sum_paid_amount') # Lay toi da 5 trung tam. centres = Centre.objects.all().order_by('id')[:3].values('id', 'name') dates = get_list_year_month(from_date, to_date) labels = list(list(dates)) datasets = [] for i in range(len(centres)): centre = centres[i] data = [] for label in labels: key = str(centre['id']) + '_' + label sum_amount = 0 for q in qs: q_key = str(q[0]) + "_" + str(q[1]) + str(q[2]).zfill(2) if key == q_key: sum_amount = q[3] data.append(sum_amount) datasets.append({ 'label': centre['name'], 'backgroundColor': CENTRE_COLORS[i], 'data': data }) return labels, datasets
def test_annotation_subquery_and_aggregate_values_chaining(self): qs = Book.objects.annotate( pub_year=ExtractYear('pubdate')).values('pub_year').annotate( top_rating=Subquery( Book.objects.filter(pubdate__year=OuterRef( 'pub_year')).order_by('-rating').values('rating')[:1]), total_pages=Sum('pages'), ).values('pub_year', 'total_pages', 'top_rating') self.assertCountEqual(qs, [ { 'pub_year': 1991, 'top_rating': 5.0, 'total_pages': 946 }, { 'pub_year': 1995, 'top_rating': 4.0, 'total_pages': 1132 }, { 'pub_year': 2007, 'top_rating': 4.5, 'total_pages': 447 }, { 'pub_year': 2008, 'top_rating': 4.0, 'total_pages': 1178 }, ])
def show_archives(context): # 统计每月发布的文章数 # 方法一: # date_list = Blog.objects.dates('created_time', 'month', order='DESC') # res = [] # for date in date_list: # Blog_count = Blog.objects.filter(created_time__year=date.year, created_time__month=date.month).count() # res.append((date, Blog_count)) # return { # 'date_list': date_list # } # 方法二: return { # 按年份和月份分组 'date_list': Blog.objects.annotate(year=ExtractYear('created_time'), month=ExtractMonth('created_time')) # 生成只包含上一步分组结果的字典 .values('year', 'month') # 排序 .order_by('year', 'month') # 再次分组将year与month相同的元素聚合 .annotate(blog_count=Count('pk')) }
def computeHoursDaily(self, employee): date = self.getDate() querySet = Timesheets.objects.filter( time__year=date.year ).values( "user__id", "user__user__first_name", "user__user__last_name" ).annotate( year=ExtractYear("time", output_field=IntegerField()), month=ExtractMonth("time", output_field=IntegerField()), day=ExtractDay("time", output_field=IntegerField()) ).annotate( seconds=ExtractSecond("time", output_field=IntegerField()), minutes=ExtractMinute("time", output_field=IntegerField()), hours=ExtractHour("time", output_field=IntegerField()) ).filter(getFilterIfContentAdmin(self.request)).order_by("time", "user__id") data = TimestampDisplay(TimeStampsManager(querySet, date.year )).getDailyView( date.day, date.month, employee.id ) if data == {}: return (0,0) else: return data[employee.id]
def _get_data_count_by(self, count_by_col, qset, zdataset, new_col, filter_by_col=None, filter_keyword=None): # count a column by group by itself if filter_by_col: qset = qset.filter(**{filter_by_col + '__iexact': filter_keyword}) if count_by_col in self.date_columns: # print('-----------annotate date by year-------------') qset = qset.annotate(**{ new_col: ExtractYear(count_by_col) }).values(new_col) data_count = qset.values(new_col).annotate(**{ self.y: Count(new_col) }).values(new_col, self.y).order_by(new_col) print('data_count = {}'.format(data_count)) #union the result dataset with zdataset zdataset = deepcopy(zdataset) # print('zdataset = {}'.format(zdataset)) for d in zdataset: for cy in data_count: if d[new_col] == cy[new_col]: d[self.y] = cy[self.y] data_count = [tuple(d.values()) for d in zdataset] print('data_count tuple = {}'.format(data_count)) return data_count or []
def test_multiple_partitioning(self): """ Find the maximum salary for each department for people hired in the same year. """ qs = Employee.objects.annotate(max=Window( expression=Max('salary'), partition_by=[F('department'), ExtractYear(F('hire_date'))], )).order_by('department', 'hire_date', 'name') self.assertQuerysetEqual( qs, [ ('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 45000), ('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 45000), ('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 37000), ('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 50000), ('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 60000), ('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 34000), ('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 100000), ('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 100000), ('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 38000), ('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 40000), ('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 55000), ('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 53000), ], transform=lambda row: (row.name, row.salary, row.department, row.hire_date, row.max))
def changelist_view(self, request, extra_context=None): response = super(SalesSummaryPanelAdmin, self).changelist_view( request, extra_context=extra_context, ) try: qs = response.context_data['cl'].queryset except (AttributeError, KeyError): return response sold_products = qs.filter(status="S").annotate(net_rev=F('price') - F('cost')) month_summary = (sold_products.annotate( sold_month=ExtractMonth('sold_date')).annotate( sold_year=ExtractYear('sold_date')).values( 'sold_month', 'sold_year').annotate( total_revenue=Sum(F('price') - F('cost'))).order_by()) totals = sold_products.aggregate( total_revenue=Sum(F('price') - F('cost')), total_cost=Sum('cost'), total_price=Sum('price')) response.context_data['sold_products'] = sold_products response.context_data['totals'] = totals response.context_data['month_summary'] = month_summary return response
def test_annotation_subquery_and_aggregate_values_chaining(self): qs = (Book.objects.annotate( pub_year=ExtractYear("pubdate")).values("pub_year").annotate( top_rating=Subquery( Book.objects.filter(pubdate__year=OuterRef( "pub_year")).order_by("-rating").values("rating")[:1]), total_pages=Sum("pages"), ).values("pub_year", "total_pages", "top_rating")) self.assertCountEqual( qs, [ { "pub_year": 1991, "top_rating": 5.0, "total_pages": 946 }, { "pub_year": 1995, "top_rating": 4.0, "total_pages": 1132 }, { "pub_year": 2007, "top_rating": 4.5, "total_pages": 447 }, { "pub_year": 2008, "top_rating": 4.0, "total_pages": 1178 }, ], )
def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) profile = Profile.objects.filter(user=self.request.user) query = Earns.objects.filter(author__in=profile) earnings_per_month = query.filter( date__range=[start_date, end_date]).annotate( month=ExtractMonth('date')).values('month').annotate( sum=Sum('quantity')).order_by() earnings_per_week = query.filter( date__range=[month_start, month_end]).annotate( week=ExtractWeek('date')).values('week').annotate( sum=Sum('quantity')).order_by('week') earnings_per_category = query.values('category__name').annotate( sum=Sum('quantity')).order_by('category') earnings_last_five_years = query.filter( date__range=[initial_year, final_year]).annotate( year=ExtractYear('date')).values('year').annotate( sum=Sum('quantity')).order_by('year') context["earnings_per_month"] = earnings_per_month context["earnings_per_week"] = earnings_per_week context["earnings_per_category"] = earnings_per_category context["earnings_last_five_years"] = earnings_last_five_years return context
def get(self, request, *args, **kwargs): """Get the gaming journal of a user. It's a timeline of finished games, grouped by year and month, sorted from newest to oldest. Args: username: username requested limit: number of entries to return """ limit = int(request.GET.get('limit', 0)) if limit <= 0: limit = None user = CustomUser.objects.get(username=request.GET['username']) entries = Journal.objects.filter(user=user).annotate( month=ExtractMonth('date'), year=ExtractYear('date')).order_by('-date')[:limit] # http://ls.pwd.io/2013/05/create-groups-from-lists-with-itertools-groupby/ response = [] year_getter = lambda x: x.year month_getter = lambda x: x.month for year, year_entries in itertools.groupby(entries, key=year_getter): year = {'year': year, 'months': []} for month, group in itertools.groupby(list(year_entries), key=month_getter): month_entries = JournalSerializer(list(group), many=True) month = {'month': month, 'entries': month_entries.data} year['months'].append(month) response.append(year) return Response(response)
def get_context_data(self, **kwargs): context = super(AjaxPublicationChartData, self).get_context_data(**kwargs) publication = get_object_or_404(Publication, slug=self.kwargs['slug']) articles = Article.objects.filter( issue__publication=publication).exclude( article_type=None) articles = articles.annotate( year=ExtractYear('issue__issue_date')).annotate( atype=F('article_type__title')).values( 'year', 'atype').order_by().annotate(count=Count('atype')) articles_sorted = sorted( articles, key=lambda x: (x['year'], x['atype'])) data = OrderedDict() for key, group in itertools.groupby( articles_sorted, key=lambda x: x['year']): data[key] = list(group) context['publication'] = publication context['data'] = data return context
def get_queryset(self): from apps.thesis.models import Reservation return super().get_queryset().select_related( 'category', 'supervisor', 'opponent', ).prefetch_related( 'authors', 'authors__groups', 'attachment_thesis', 'attachment_thesis__type_attachment', 'review_thesis', 'review_thesis__user', 'reservation_thesis', ).annotate( available_for_reservation=~Exists( queryset=Reservation.objects.filter( thesis=OuterRef('pk'), state__in=Reservation.OPEN_RESERVATION_STATES, ) ), open_reservations_count=Count( 'reservation_thesis', filter=Q(reservation_thesis__state__in=Reservation.OPEN_RESERVATION_STATES), ), published_at_year=Cast(ExtractYear('published_at'), CharField()) )
def build_object(self, obj): # Build tag view tag_counts = ((tag, tag.post_count) for tag in BlogPostTag.tag_model().objects.filter( blogpost__in=obj.public_posts()).annotate( post_count=Count('blogpost'))) self.paginate_subpage(obj, tag_counts, 'posts_by_tag', lambda tag: {'tag': tag.slug}) # Build date view # It's possible to get a post count by publication year/month via the ORM, but this # is much more straightforward. For the record, I did get the ORM version working. pub_dates = obj.public_posts().values_list(ExtractYear('pub_date'), ExtractMonth('pub_date')) month_counter = Counter(pub_dates) year_counter = Counter(ym[0] for ym in pub_dates) self.paginate_subpage(obj, month_counter.items(), 'posts_by_date', lambda ym: { 'year': ym[0], 'month': ym[1] }) self.paginate_subpage(obj, year_counter.items(), 'posts_by_date', lambda year: {'year': year}) # Build all posts view self.paginate_subpage(obj, ((None, sum(year_counter.values())), ), 'all_posts', lambda _: {}, always_build_first_page=True)
def complainant_gender_aggregation(self): query = self.officerallegation_set.all() query = query.values('allegation__complainant__gender').annotate( complainant_gender=models.Case( models.When(allegation__complainant__gender='', then=models.Value('Unknown')), models.When(allegation__complainant__isnull=True, then=models.Value('Unknown')), default='allegation__complainant__gender', output_field=models.CharField()), year=ExtractYear('start_date')) query = query.values('complainant_gender', 'year').order_by( 'complainant_gender', 'year').annotate( count=models.Count('complainant_gender'), sustained_count=models.Sum( models.Case(models.When(final_finding='SU', then=1), default=models.Value(0), output_field=models.IntegerField()))) data = [{ 'name': GENDER_DICT.get(obj['complainant_gender'], 'Unknown'), 'sustained_count': obj['sustained_count'], 'count': obj['count'], 'year': obj['year'] } for obj in query if obj['count'] > 0] return Officer._group_and_sort_aggregations(data)
def get_activity_age(cls, org, time_filter): now = timezone.now() today = now.date() year_ago = now - timedelta(days=365) start = year_ago.replace(day=1).date() ages = AgeSegment.objects.all().values("id", "min_age", "max_age") output_data = [] for age in ages: if age["min_age"] == 0: data_key = "0-14" elif age["min_age"] == 15: data_key = "15-19" elif age["min_age"] == 20: data_key = "20-24" elif age["min_age"] == 25: data_key = "25-30" elif age["min_age"] == 31: data_key = "31-34" elif age["min_age"] == 35: data_key = "35+" activities = (ContactActivity.objects.filter( org=org, date__lte=today, date__gte=start).exclude(born=None).exclude( date=None).annotate(year=ExtractYear("date")).annotate( age=ExpressionWrapper( F("year") - F("born"), output_field=IntegerField())).filter( age__gte=age["min_age"], age__lte=age["max_age"]).values( "date").annotate(Count("id"))) series = ContactActivity.get_activity_data(activities, time_filter) output_data.append(dict(name=data_key, data=series)) return output_data
def get(self, request, category): all_events = (Event.objects.annotate( year=ExtractYear('start_datetime')).annotate( start_week=ExtractWeek('start_datetime'))) today = datetime.datetime.today() current_year = today.isocalendar()[0] current_week = today.isocalendar()[1] events = [] if category == 'current_week': for event in all_events: if event.year == current_year and event.start_week == current_week: events.append(event) elif category == 'future_events': for event in all_events: if (event.year == current_year and event.start_week > current_week) or event.year > current_year: events.append(event) else: for event in all_events: if (event.year == current_year and event.start_week < current_week) or event.year < current_year: events.append(event) menu_items = [] for event in events: menu_items.append( MenuItem(description=truncatechars(event.description, 30), method='GET', path=event.get_absolute_url())) content = Menu(body=menu_items, header='menu') return self.to_response(content)
def test_rank(self): """ Rank the employees based on the year they're were hired. Since there are multiple employees hired in different years, this will contain gaps. """ qs = Employee.objects.annotate(rank=Window( expression=Rank(), order_by=ExtractYear(F('hire_date')).asc(), )) self.assertQuerysetEqual(qs, [ ('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 1), ('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 1), ('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 1), ('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 4), ('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 5), ('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 6), ('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 6), ('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 6), ('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 9), ('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 10), ('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 11), ('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 11), ], lambda entry: (entry.name, entry.salary, entry.department, entry.hire_date, entry.rank), ordered=False)
def order_intake_per_month(request, *args, **kwargs): orders = Order.objects \ .annotate(month=ExtractMonth('date_ordered'), year=ExtractYear('date_ordered')) \ .values('month', 'year') \ .annotate(count=Count('pk')) \ .annotate(bikes=Sum('bikes')) labels = [] orders_count = [] bikes_count = [] for order in orders: month = str(order['month']) year = str(order['year']) order_count = order['count'] bike_count = order['bikes'] labels.append(year + "-" + month) orders_count.append(order_count) bikes_count.append(bike_count) data = { "labels": labels, "orders_count": orders_count, "bikes_count": bikes_count, } return JsonResponse(data, safe=False)