def test_trunc_timezone_applied_before_truncation(self): start_datetime = microsecond_support( datetime(2016, 1, 1, 1, 30, 50, 321)) end_datetime = microsecond_support( datetime(2016, 6, 15, 14, 10, 50, 123)) start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) melb = pytz.timezone('Australia/Melbourne') pacific = pytz.timezone('US/Pacific') model = DTModel.objects.annotate( melb_year=TruncYear('start_datetime', tzinfo=melb), pacific_year=TruncYear('start_datetime', tzinfo=pacific), ).order_by('start_datetime').get() self.assertEqual(model.start_datetime, start_datetime) self.assertEqual(model.melb_year, truncate_to(start_datetime, 'year', melb)) self.assertEqual(model.pacific_year, truncate_to(start_datetime, 'year', pacific)) self.assertEqual(model.start_datetime.year, 2016) self.assertEqual(model.melb_year.year, 2016) self.assertEqual(model.pacific_year.year, 2015)
def test_trunc_year_func(self): start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321)) end_datetime = truncate_to(microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123)), 'year') if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncYear('start_datetime')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime, 'year')), (end_datetime, truncate_to(end_datetime, 'year')), ], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncYear('start_date')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime.date(), 'year')), (end_datetime, truncate_to(end_datetime.date(), 'year')), ], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime=TruncYear('start_datetime')).count(), 1)
def test_trunc_year_func(self): start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321)) end_datetime = truncate_to(microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123)), 'year') if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncYear('start_datetime')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime, 'year')), (end_datetime, truncate_to(end_datetime, 'year')), ], lambda m: (m.start_datetime, m.extracted) ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncYear('start_date')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime.date(), 'year')), (end_datetime, truncate_to(end_datetime.date(), 'year')), ], lambda m: (m.start_datetime, m.extracted) ) self.assertEqual(DTModel.objects.filter(start_datetime=TruncYear('start_datetime')).count(), 1) with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncYear('start_time'))) with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncYear('start_time', output_field=TimeField())))
def update_course_daily_stats(self, last_tracker_pk=0, newest_tracker_pk=0): if last_tracker_pk == 0: CourseDailyStats.objects.all().delete() # get different (distinct) courses/dates involved course_daily_type_logs = Tracker.objects \ .filter(pk__gt=last_tracker_pk, pk__lte=newest_tracker_pk) \ .exclude(course__isnull=True) \ .annotate(day=TruncDay('tracker_date'), month=TruncMonth('tracker_date'), year=TruncYear('tracker_date')) \ .values('course', 'day', 'month', 'year', 'type') \ .annotate(total=Count('type')) \ .order_by('day') total_logs = course_daily_type_logs.count() self.stdout.write('%d different courses/dates/types to process.' % total_logs) count = 0 for type_log in course_daily_type_logs: course = Course.objects.get(pk=type_log['course']) stats, created = CourseDailyStats.objects \ .get_or_create(course=course, day=type_log['day'], type=type_log['type']) stats.total = (0 if last_tracker_pk == 0 else stats.total) \ + type_log['total'] stats.save() count += 1 self.stdout.write(str(count)) # get different (distinct) search logs involved search_daily_logs = Tracker.objects \ .filter(pk__gt=last_tracker_pk, pk__lte=newest_tracker_pk, user__is_staff=False, type='search') \ .annotate(day=TruncDay('tracker_date'), month=TruncMonth('tracker_date'), year=TruncYear('tracker_date')) \ .values('day', 'month', 'year') \ .annotate(total=Count('id')) \ .order_by('day') self.stdout.write('%d different search/dates to process.' % search_daily_logs.count()) for search_log in search_daily_logs: stats, created = CourseDailyStats.objects \ .get_or_create(course=None, day=search_log['day'], type='search') stats.total = (0 if last_tracker_pk == 0 else stats.total) \ + search_log['total'] stats.save()
def museum_view(request): def convert_timedelta(td): return { 'year': td.days // 365, 'day': td.days % 365, 'hour': td.seconds // 3600, 'minute': (td.seconds % 3600) // 60, 'second': td.seconds % 60 } ctx = {} ctx['total_problem_count'] = Problem.objects.count() ctx['total_submission_count'] = Submission.objects.count() ctx['total_user_count'] = User.objects.filter(is_active=True).count() # NOTE: this will break if there is no submission at all first_submission = Submission.objects.last() ctx['first_submission_time'] = first_submission.create_time ctx['first_submission_duration'] = convert_timedelta( datetime.now() - ctx['first_submission_time']) ctx['first_submission_author'] = first_submission.author from uptime import uptime ctx['uptime'] = convert_timedelta(timedelta(seconds=uptime())) ctx['server_time'] = datetime.now() ctx['eoj3_create_duration'] = convert_timedelta( datetime.now() - datetime(2017, 3, 11, 18, 32)) ctx['submission_count_1'] = Submission.objects.filter( create_time__gt=datetime.now() - timedelta(days=1)).count() ctx['submission_count_7'] = Submission.objects.filter( create_time__gt=datetime.now() - timedelta(days=7)).count() ctx['submission_count_30'] = Submission.objects.filter( create_time__gt=datetime.now() - timedelta(days=30)).count() ctx['submission_stat'] = Submission.objects.filter(create_time__gt=datetime.today() - timedelta(days=30)). \ annotate(date=TruncDate('create_time')).values('date'). \ annotate(count=Count('id')).values('date', 'count').order_by() ctx['user_stat'] = User.objects.filter(is_active=True).annotate(date=TruncYear('date_joined')).values('date'). \ annotate(count=Count('id')).values('date', 'count').order_by("date") for idx, user in enumerate(ctx['user_stat']): if idx == 0: continue user['count'] += ctx['user_stat'][idx - 1]['count'] ctx['problem_stat'] = Problem.objects.annotate(date=TruncYear('create_time')).values('date'). \ annotate(count=Count('id')).values('date', 'count').order_by("date") for idx, user in enumerate(ctx['problem_stat']): if idx == 0: continue user['count'] += ctx['problem_stat'][idx - 1]['count'] ctx['servers'] = servers = Server.objects.filter(enabled=True) for server in servers: server.status = ping(server) return render(request, 'museum.jinja2', context=ctx)
def order_by_year(request): summaries = Order.objects.annotate( year=TruncYear('date')).values('year').annotate( sum=Sum('total_price')).values('year', 'sum').annotate( count=Count('id')).values('year', 'sum', 'count').order_by('-year') return render(request, 'order_by_year.html', {'summaries': summaries})
def list(self, request): # Puts photos per year in list, 0-index = newest year photos_per_year = models.Photo.objects.annotate( year=TruncYear('date_taken')).values('year').annotate( count=Count('pk')).values('count', 'year') # TODO same thing for analog and digital photos (use filter on photos_per_year) # This should wait untill after database merge # has to be in this format for graphics photo_per_year_list = [] for year in photos_per_year: y = year.get('year').year photo_per_year_list.append( [str(year.get('year').year), year.get('count')]) photo_per_year_list.sort() # TODO sort amount of photos in each album photos_per_album = models.Photo.objects.values('album__name').annotate( Count('album')).order_by('album__name') print('----------') print(photos_per_album) print('----------') statistics = Statistics( photos=models.Photo.objects.all().count(), tags=models.Tag.objects.all().count(), scanned=models.Photo.objects.filter(scanned=True).count(), albums=models.Photo.objects.all().count(), splash=models.Photo.objects.filter(splash=True).count(), orders=models.Order.objects.all().count(), photos_by_year=photo_per_year_list, photos_per_album=photos_per_album) serializer = serializers.StatisticsSerializer(statistics) return Response(serializer.data)
def requests_by_year(): result = RequestTimeline.objects.annotate( year=TruncYear('request_date')).values('year', ) \ .annotate(requests_per_year=Count('id')) \ .order_by('year') return result
def get_tracker_activities(start_date, end_date, user, course_ids=[], course=None): activity = [] no_days = (end_date - start_date).days + 1 if course: trackers = Tracker.objects.filter(course=course) else: trackers = Tracker.objects.filter(course__id__in=course_ids) trackers = trackers.filter(user=user, tracker_date__gte=start_date, tracker_date__lte=end_date) \ .annotate(day=TruncDay('tracker_date'), month=TruncMonth('tracker_date'), year=TruncYear('tracker_date')) \ .values('day') \ .annotate(count=Count('id')) for i in range(0, no_days, +1): temp = start_date + datetime.timedelta(days=i) temp_date = temp.date().strftime(STR_DATE_FORMAT) count = next((dct['count'] for dct in trackers if dct['day'].strftime(STR_DATE_FORMAT) == temp_date), 0) activity.append([temp_date, count]) return activity
def teacher_home_view(request): cohorts = get_cohorts(request) start_date = timezone.now() - datetime.timedelta(days=31) end_date = timezone.now() # get student activity activity = [] no_days = (end_date - start_date).days + 1 students = User.objects \ .filter(participant__role=Participant.STUDENT, participant__cohort__in=cohorts).distinct() courses = Course.objects \ .filter(coursecohort__cohort__in=cohorts).distinct() trackers = Tracker.objects.filter(course__in=courses, user__in=students, tracker_date__gte=start_date, tracker_date__lte=end_date) \ .annotate(day=TruncDay('tracker_date'), month=TruncMonth('tracker_date'), year=TruncYear('tracker_date')) \ .values('day') \ .annotate(count=Count('id')) for i in range(0, no_days, +1): temp = start_date + datetime.timedelta(days=i) temp_date = temp.date().strftime(STR_DATE_FORMAT) count = next((dct['count'] for dct in trackers if dct['day'].strftime(STR_DATE_FORMAT) == temp_date), 0) activity.append([temp.strftime(STR_DATE_FORMAT), count]) return render(request, 'oppia/home-teacher.html', {'cohorts': cohorts, 'activity_graph_data': activity, })
def get_graph_data(self, start_date, end_date): daily_downloads = CourseDailyStats.objects \ .filter(day__gte=start_date, day__lte=end_date, type='download') \ .values('day') \ .annotate(count=Sum('total')) \ .order_by('day') course_downloads = CourseDailyStats.objects \ .filter(day__gte=start_date, day__lte=end_date, type='download') \ .annotate(month=TruncMonth('day'), year=TruncYear('day')) \ .values('month', 'year') \ .annotate(count=Sum('total')) \ .order_by('year', 'month') previous_course_downloads = CourseDailyStats.objects \ .filter(day__lt=start_date, type='download') \ .aggregate(total=Sum('total')) \ .get('total', 0) if previous_course_downloads is None: previous_course_downloads = 0 return { 'daily_downloads': daily_downloads, 'course_downloads': course_downloads, 'previous_course_downloads': previous_course_downloads }
def group_search(request): if request.method == 'POST': search_details = request.data group_by = search_details['group_by'] order_by = search_details['order_by'] if not search_details.get('date_group'): search_results = Tweets.objects.all().values(*group_by).annotate(count=Count(group_by[0])).order_by(*order_by) response_serializer = GroupSearchSerializer(search_results, fields=(*group_by, 'count'), many=True) print(response_serializer.data) return Response(response_serializer.data,status=status.HTTP_200_OK) else: bucket_by = TruncDay('created_at') if search_details['date_group']['bucket_by'] == 'quarter': bucket_by = TruncQuarter('created_at') elif search_details['date_group']['bucket_by'] == 'month': bucket_by = TruncMonth('created_at') elif search_details['date_group']['bucket_by'] == 'year': bucket_by = TruncYear('created_at') search_results = Tweets.objects.filter(created_at__gte=search_details['date_group']['start_date'], created_at__lte=search_details['date_group']['end_date'])\ .annotate(date=bucket_by).values("date", *group_by)\ .annotate(count=Count('date')).order_by(*order_by) response_serializer = GroupSearchSerializer(search_results, fields=(*group_by, 'date', 'count'), many=True) return Response(response_serializer.data, status=status.HTTP_200_OK)
def harvests_by_year(): result = Harvests.objects.annotate( year=TruncYear('harvest_date')).values('year', ) \ .annotate(kills_per_year=Count('id')) \ .order_by('year') return result
def radpharmstat(request): prod_years = list( Production.objects.order_by('-datum').dates('datum', 'year')) prod_data = list( set( Production.objects.order_by( 'radiopharmaceutical__name', '-datum').annotate(year=TruncYear('datum')).values_list( 'radiopharmaceutical__name', 'year'))) start_radiopharmaceutical = Production.objects.filter( datum__gte=timezone.now() - relativedelta( years=1)).order_by('radiopharmaceutical__name').values_list( 'radiopharmaceutical__name').distinct() prod_years = [obj.year for obj in prod_years] prod_years.sort(reverse=True) prod_radpharm_by_year = {str(obj): [] for obj in prod_years} radiopharmaceutical = [] for obj in prod_data: prod_radpharm_by_year[str(obj[1].year)].append(obj[0]) radiopharmaceutical.append(obj[0]) prod_radpharm_by_year['0'] = [obj[0] for obj in start_radiopharmaceutical] context = dict(years=prod_years, year_radpharm=prod_radpharm_by_year, radiopharmaceuticals=list(set(radiopharmaceutical)), radpharm=prod_radpharm_by_year['0']) return render(request=request, template_name='radpharmprod/view_statistics.html', context=context)
def groundhogs_by_year(): result = RemovalsByLocation.objects.annotate( year=TruncYear('removal_date')).values('year', ) \ .annotate(kills_per_year=Count('id')) \ .order_by('year') return result
def get(self, request): cases = Case.objects.all()\ .annotate(year=TruncYear('created'))\ .values("year")\ .annotate(cases_created=Count('id'))\ .order_by("year") cases_status = [] for state in CaseWorkflow.states: count = Case.objects.filter(status=state).count() cases_status.append({"status": state.name, "count": count}) cases_category = Case.objects.all()\ .values("categories")\ .annotate(count=Count('id')) statistics = { "cases": { "total": Case.objects.count(), "yearly": YearlyCasesSerializer(cases, many=True).data, "by_status": cases_status, "by_category": cases_category, }, "organisations": "to be implemented", } return Response(statistics)
def get_edemocracia_registers_yearly(start_date=None): batch_size = 100 end_date = date.today().replace(day=1, month=1) - timedelta(days=1) if not start_date: start_date = end_date.replace(day=1, month=1).strftime('%Y-%m-%d') registers_monthly = EdemocraciaAnalysis.objects.filter( period='monthly', start_date__gte=start_date, end_date__lte=end_date.strftime('%Y-%m-%d')) registers_count = Cast( Func(F('data'), Value('register_count'), function='jsonb_extract_path_text'), IntegerField()) data_by_year = registers_monthly.annotate( year=TruncYear('start_date')).values('year').annotate( total_registers=Sum(registers_count)).values( 'year', 'total_registers') registers_yearly = [ save_registers_count(result, 'yearly') for result in data_by_year ] EdemocraciaAnalysis.objects.bulk_create(registers_yearly, batch_size, ignore_conflicts=True)
def blogs(request): req_cat = request.GET.get('category') if req_cat: qs = Blogs.objects.filter(category=req_cat) else: qs = Blogs.objects.all() if request.GET.get('arch'): req_arch = request.GET.get('arch').split(' ')[0] qs = qs.filter(publish__year=req_arch) search_blog = request.GET.get('search_blog') if search_blog: qs = qs.filter( Q(title__icontains=search_blog) | Q(text__icontains=search_blog) ) p = Paginator(qs, 6) page = request.GET.get('page') objects = p.get_page(page) resents = qs.order_by('-publish')[:4] archeves = qs.annotate(year=TruncYear('publish')).values('year').annotate(year_count=Count('id')) cats = qs.annotate(cat_count=Count('category')) context = { 'categories': Categories.objects.filter(parent_id=0), 'objects': objects, 'cars': show_cars(), 'categs': cats, 'resents': resents, 'archeves': archeves, } return render(request, 'blog/blogs.html', context)
def get_trackers(start_date, end_date, courses, students=None, date_data='tracker_date'): activity = [] no_days = (end_date - start_date).days + 1 if date_data == "submitted_date": trackers = Tracker.objects.filter(course__in=courses, submitted_date__gte=start_date, submitted_date__lte=end_date) else: trackers = Tracker.objects.filter(course__in=courses, tracker_date__gte=start_date, tracker_date__lte=end_date) if students: trackers.filter(user__in=students) trackers.annotate(day=TruncDay(date_data), month=TruncMonth(date_data), year=TruncYear(date_data)) \ .values('day') \ .annotate(count=Count('id')) for i in range(0, no_days, +1): temp = start_date + datetime.timedelta(days=i) temp_date = temp.date().strftime(constants.STR_DATE_DISPLAY_FORMAT) count = next((dct['count'] for dct in trackers if dct['day'].strftime(constants.STR_DATE_DISPLAY_FORMAT) == temp_date), 0) activity.append([temp.strftime(constants.STR_DATE_DISPLAY_FORMAT), count]) return activity
def get(self, request): years_queryset = Event.objects.all().annotate(year=TruncYear('date'))\ .values('year')\ .distinct()\ .order_by('year') years_list = [entry['year'].year for entry in years_queryset] return Response(years_list)
def get(self, request, *args, **kwargs): query = Expense.objects \ .annotate(month=TruncMonth('expense_date')) \ .values('month') \ .distinct() months = set() for q in query[:]: m = q['month'].month months.add(m) query = Expense.objects \ .annotate(year=TruncYear('expense_date')) \ .values('year') \ .distinct() years = set() for q in query[:]: m = q['year'].year years.add(m) print(months, years) dict = {'available_months': months, 'available_years': years} return Response(dict)
def process(self, request, form, start_date, end_date): course_downloads = CourseDailyStats.objects \ .filter(day__gte=start_date, day__lte=end_date, type='download') \ .annotate(month=TruncMonth('day'), year=TruncYear('day')) \ .values('month', 'year') \ .annotate(count=Sum('total')) \ .order_by('year', 'month') previous_course_downloads = CourseDailyStats.objects \ .filter(day__lt=start_date, type='download') \ .aggregate(total=Sum('total')) \ .get('total', 0) if previous_course_downloads is None: previous_course_downloads = 0 return render( request, 'reports/course_downloads.html', { 'form': form, 'course_downloads': course_downloads, 'previous_course_downloads': previous_course_downloads })
def filter_queryset(self, request, queryset, view): ordering = request.query_params.get('ordering', '') if not ordering.lstrip('-') == 'reference_number': return queryset asc_desc = "-" if ordering.startswith("-") else "" ordering_params = ["{}{}".format(asc_desc, param) for param in ["created_year", "id"]] return queryset.annotate(created_year=TruncYear("created")).order_by(*ordering_params)
def student_financialaid_view(request, nric_no): student = get_object_or_404(Student, nric_no=nric_no) summaryGrants = Grant.objects.filter(student=student).annotate(month=TruncYear('disbursed_date')).values( 'disbursed_date').annotate(sum=Sum('amount')).order_by() print(summaryGrants) grants = student.grants.all context = {'student': student, 'summaryGrants': summaryGrants, 'grants': grants} return render(request, 'portfolio/student_financialaid.html', context)
def get_graph_data(self, start_date, end_date): daily_activity = CourseDailyStats.objects \ .filter(day__gte=start_date, day__lte=end_date) \ .values('day') \ .annotate(count=Sum('total')) \ .order_by('day') course_activity = CourseDailyStats.objects \ .filter(day__gte=start_date, day__lte=end_date) \ .annotate(month=TruncMonth('day'), year=TruncYear('day')) \ .values('month', 'year') \ .annotate(count=Sum('total')) \ .order_by('year', 'month') previous_course_activity = CourseDailyStats.objects \ .filter(day__lt=start_date) \ .aggregate(total=Sum('total')) \ .get('total', 0) if previous_course_activity is None: previous_course_activity = 0 last_month = timezone.now() - datetime.timedelta(days=131) hit_by_course = CourseDailyStats.objects \ .filter(day__gte=last_month, course__isnull=False) \ .values('course_id') \ .annotate(total_hits=Sum('total')) \ .order_by('-total_hits') total_hits = sum(cstats['total_hits'] for cstats in hit_by_course) i = 0 hot_courses = [] other_course_activity = 0 for hbc in hit_by_course: if i < 10: hits_percent = float(hbc['total_hits'] * 100.0 / total_hits) course = Course.objects.get(id=hbc['course_id']) hot_courses.append({ 'course': course, 'hits_percent': hits_percent }) else: other_course_activity += hbc['total_hits'] i += 1 if i > 10: hits_percent = float(other_course_activity * 100.0 / total_hits) hot_courses.append({ 'course': _('Other'), 'hits_percent': hits_percent }) return { 'daily_activity': daily_activity, 'course_activity': course_activity, 'previous_course_activity': previous_course_activity, 'hot_courses': hot_courses }
def handle_download_year(observatories, start_date, end_date): query = PrecipitationMeasurement.objects.filter(observatory__in=observatories, measure_datetime__gte=start_date, measure_datetime__lte=end_date)\ .annotate(measure_year=TruncYear('measure_datetime'))\ .values('observatory','observatory__name','measure_year')\ .annotate(Sum('rainfall_rate'))\ .order_by('observatory', 'measure_year') return render_to_csv_response(query)
def filter_queryset(self, request, queryset, view): ordering = request.query_params.get('ordering', '') if not ordering.lstrip('-') == 'unique_id': return queryset ordering_params = ['partner__name', 'engagement_type', 'created_year', 'id'] return queryset.annotate(created_year=TruncYear('created'))\ .order_by(*map(lambda param: ('' if ordering == 'unique_id' else '-') + param, ordering_params))
def year_breakdown(self, entries=None): if entries is None: entries = self.entry_set.all() # Truncate to year and add to select list entries = entries.annotate(year=TruncYear('when')).values('year') # Group By year and select the count of the grouping entries = entries.annotate(count=models.Count('id'), total=models.Sum('amount')) return entries
def filter_queryset(self, request, queryset, view): ordering = request.query_params.get('ordering', '') if not ordering.lstrip('-') == 'reference_number': return queryset ordering_params = ['created_year', 'id'] return queryset.annotate(created_year=TruncYear('created'))\ .order_by(*map(lambda param: ('' if ordering == 'reference_number' else '-') + param, ordering_params))
def revenue_per_year(): money = Revenue.objects.annotate( year=TruncYear('date')).values('year').annotate( mysum=Sum('amount')).order_by('year') chart_revenue_per_year = {'labels': [], 'series': []} for i in money: chart_revenue_per_year['labels'].append(i['year'].strftime('%Y')) chart_revenue_per_year['series'].append(i['mysum']) return chart_revenue_per_year