def test_trunc_day_func(self): start_datetime = microsecond_support( datetime(2015, 6, 15, 14, 30, 50, 321)) end_datetime = truncate_to( microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123)), 'day') if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime, is_dst=False) end_datetime = timezone.make_aware(end_datetime, is_dst=False) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncDay( 'start_datetime')).order_by('start_datetime'), [ (start_datetime, truncate_to(start_datetime, 'day')), (end_datetime, truncate_to(end_datetime, 'day')), ], lambda m: (m.start_datetime, m.extracted)) self.assertEqual( DTModel.objects.filter( start_datetime=TruncDay('start_datetime')).count(), 1) with self.assertRaisesMessage( ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"): list(DTModel.objects.annotate(truncated=TruncDay('start_time'))) with self.assertRaisesMessage( ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"): list( DTModel.objects.annotate(truncated=TruncDay( 'start_time', output_field=TimeField())))
def chart_data(self, qs_filter=None): #print("Fields order: ", self.list_display) if not qs_filter: #retorno todos los elementos por cualquier error de req return ( #Oferta.objects.annotate(date=Trunc("fecha", "month")) Oferta.objects.annotate(date=TruncDay("fecha") ).values("date").annotate( y=Count("id")).order_by('-date')) qs = Q() #Condiciones para el campo de busqueda. if "q" in qs_filter.keys(): search_fields = [ 'cliente__nombre', "asunto", "id", "usuario__first_name", "usuario__last_name", "usuario__username", "moneda__codigo", "moneda__nombre" ] condic = {} for f in search_fields: condic[f'{f}__icontains'] = qs_filter.get("q") for key, value in condic.items(): qs.add(Q(**{key: value}), Q.OR) #Condiciones de filtro for k, v in qs_filter.dict().items(): #excluyo el campo de busqueda (q) y orden (o) if k is not "q" and k is not "o": qs.add(Q(**{k: v}), Q.AND) return (Oferta.objects.filter(qs).annotate( date=TruncDay("fecha")).values("date").annotate( y=Count("id")).order_by("-date"))
def update_daily_active_users(self, last_tracker_pk=0, newest_tracker_pk=0, fromstart=False): if fromstart: # wipe the cache table first DailyActiveUsers.objects.all().delete() # Process based on the submitted_date trackers = Tracker.objects.filter(pk__gt=last_tracker_pk, pk__lte=newest_tracker_pk) \ .annotate(day=TruncDay('submitted_date')).values('day').distinct() # for each tracker update the DAU model for tracker in trackers: print('Updating DAUs for %s' % tracker['day']) total_users = Tracker.objects.annotate( day=TruncDay('submitted_date')) \ .filter(day=tracker['day']) \ .aggregate(number_of_users=Count('user', distinct=True)) dau_obj, created = DailyActiveUsers.objects.update_or_create( day=tracker['day'], defaults={"total_submitted_date": total_users['number_of_users']}) user_submitted = Tracker.objects.annotate( day=TruncDay('submitted_date')) \ .filter(day=tracker['day']).values_list('user', flat=True).distinct() for us in user_submitted: time_spent = Tracker.objects.annotate( day=TruncDay('submitted_date')) \ .filter(day=tracker['day'], user__pk=us) \ .aggregate(time=Sum('time_taken')) u = User.objects.get(pk=us) # to avoid number out of range if time_spent['time'] > 2147483647: time_taken = 2147483647 else: time_taken = time_spent['time'] try: dau = DailyActiveUser() dau.dau = dau_obj dau.user = u dau.type = DailyActiveUser.SUBMITTED dau.time_spent = time_taken dau.save() print("added %s" % u.username) except IntegrityError: dau = DailyActiveUser.objects.get( user=u, dau=dau_obj, type=DailyActiveUser.SUBMITTED) dau.time_spent = time_taken dau.save() print("updated %s" % u.username)
def stats_export_csv_view(request): if not request.user.is_superuser: raise PermissionDenied() header = ['date', 'signups', 'issue_count', 'comment_count'] tz = pytz.timezone('Asia/Seoul') response = HttpResponse(content_type="text/csv") filename = 'stats_%s.csv' % now().isoformat()[:16].replace(':', '-') response['Content-Disposition'] = 'attachment; filename="%s"' % filename writer = csv.DictWriter(response, fieldnames=header, restval=0) writer.writer.writerow(writer.fieldnames) user_stats = User.objects.all().filter(assignment__isnull=False).annotate( date=TruncDay('date_joined', tzinfo=tz)).order_by('date').values( 'date').annotate(signups=Count('id', distinct=True)) issue_stats = Issue.objects.all().annotate(date=TruncDay( 'created_date', tzinfo=tz)).order_by('date').values('date').annotate( issue_count=Count('id', distinct=True)) comment_stats = Comment.objects.all().annotate(date=TruncDay( 'created_date', tzinfo=tz)).order_by('date').values('date').annotate( comment_count=Count('id', distinct=True)) merged_stats = defaultdict(dict) for stat in ( user_stats, issue_stats, comment_stats, ): for row in stat: row['date'] = row['date'].date() merged_stats[row['date']].update(row) merged_stats = sorted(merged_stats.values(), key=itemgetter('date')) writer.writerows(merged_stats) return response
def changelist_view(self, request, extra_context=None): # Aggregate new subscribers per day begin_date = datetime.now() - timedelta(days=8) if request.user.is_superuser: chart_data = ( ListenerLog.objects .filter(updated__gt=begin_date) .annotate(date=TruncDay("updated")) .values("date") .annotate(y=Count("id")) .order_by("-date") ) else: chart_data = ( ListenerLog.objects.filter(player__mount__station__site=request.user.siteuser.site).annotate( date=TruncDay("updated")) .values("date") .annotate(y=Count("id")) .order_by("-date") ) # Serialize and attach the chart data to the template context as_json = json.dumps(list(chart_data), cls=DjangoJSONEncoder) extra_context = extra_context or {"chart_data": as_json} # Call the superclass changelist_view to render the page return super().changelist_view(request, extra_context=extra_context)
def update_course_daily_stats(self, last_tracker_pk=0, newest_tracker_pk=0): if last_tracker_pk == 0: CourseDailyStats.objects.all().delete() # get different (distinct) courses/dates involved course_daily_type_logs = Tracker.objects \ .filter(pk__gt=last_tracker_pk, pk__lte=newest_tracker_pk) \ .exclude(course__isnull=True) \ .annotate(day=TruncDay('tracker_date'), month=TruncMonth('tracker_date'), year=TruncYear('tracker_date')) \ .values('course', 'day', 'month', 'year', 'type') \ .annotate(total=Count('type')) \ .order_by('day') total_logs = course_daily_type_logs.count() self.stdout.write('%d different courses/dates/types to process.' % total_logs) count = 0 for type_log in course_daily_type_logs: course = Course.objects.get(pk=type_log['course']) stats, created = CourseDailyStats.objects \ .get_or_create(course=course, day=type_log['day'], type=type_log['type']) stats.total = (0 if last_tracker_pk == 0 else stats.total) \ + type_log['total'] stats.save() count += 1 self.stdout.write(str(count)) # get different (distinct) search logs involved search_daily_logs = Tracker.objects \ .filter(pk__gt=last_tracker_pk, pk__lte=newest_tracker_pk, user__is_staff=False, type='search') \ .annotate(day=TruncDay('tracker_date'), month=TruncMonth('tracker_date'), year=TruncYear('tracker_date')) \ .values('day', 'month', 'year') \ .annotate(total=Count('id')) \ .order_by('day') self.stdout.write('%d different search/dates to process.' % search_daily_logs.count()) for search_log in search_daily_logs: stats, created = CourseDailyStats.objects \ .get_or_create(course=None, day=search_log['day'], type='search') stats.total = (0 if last_tracker_pk == 0 else stats.total) \ + search_log['total'] stats.save()
def handle(self, **options): logger.info("message") sleep = options['sleep'] dates = Passage.objects.aggregate(min=TruncDay(Min('passage_at')), max=TruncDay(Max('passage_at'))) print(dates) if not dates['min']: self.stdout.write('No data to be processed') return date_min = dates['min'] + timedelta(days=1) date_max = dates['max'] + timedelta(days=2) i = 0 initial_size = initial_size_pretty = None for date in (date_min + timedelta(n) for n in range((date_max - date_min).days)): i += 1 partition_name = f'passage_passage_{date:%Y%m%d}' vacuum_query = f'VACUUM FULL ANALYZE {partition_name}' size_query = ("SELECT pg_database_size(" f"'{settings.DATABASES['default']['NAME']}');") self.stdout.write(f'Starting vacuum: {vacuum_query}') try: with connection.cursor() as cursor: cursor.execute(size_query) size = cursor.fetchone()[0] if not initial_size: initial_size = size initial_size_pretty = filesizeformat(size) size_pretty = filesizeformat(size) change = size - initial_size change_pretty = filesizeformat(change) procentual_change = change / initial_size * 100 self.stdout.write( f'Total DB size: {self.style.SUCCESS(size_pretty)} ' f'(initial: {initial_size_pretty}, ' f'change: {change_pretty}, ' f'{procentual_change:.1f}%)') cursor.execute(vacuum_query) except Exception as e: self.stderr.write(f'Error vacuuming: {e}') if i % 10 == 0: self.stdout.write(f'sleeping for: {self.style.SUCCESS(sleep)}') time.sleep(sleep) else: self.stdout.write(f'sleeping for: {self.style.SUCCESS(0.1)}') time.sleep(0.1) self.stdout.write(self.style.SUCCESS('Finished'))
def monitor_user(request, short_name, user_id): dataset = get_object_or_404(Dataset, short_name=short_name) if not dataset.user_is_maintainer(request.user): return HttpResponseRedirect(reverse('dataset', args=[dataset.short_name])) user = get_object_or_404(User, id=user_id) contribs = list(user.votes.filter(candidate_annotation__sound_dataset__dataset=dataset) .filter(from_expert=False) .annotate(day=TruncDay('created_at')) .order_by("-day") .values('day').annotate(count=Count('id')) .values_list('day', 'count', 'candidate_annotation__taxonomy_node__name')) contribs_failed = list(user.votes.filter(candidate_annotation__sound_dataset__dataset=dataset) .filter(test='FA') .annotate(day=TruncDay('created_at')) .order_by("-day") .values('day').annotate(count=Count('id')) .values_list('day', 'count', 'candidate_annotation__taxonomy_node__name')) contribs_curation_task = list(user.votes.filter(candidate_annotation__sound_dataset__dataset=dataset) .filter(from_expert=True) .annotate(day=TruncDay('created_at')) .order_by("-day") .values('day').annotate(count=Count('id')) .values_list('day', 'count', 'candidate_annotation__taxonomy_node__name', 'vote')) if contribs: contribs[0] += ('g',) for idx, contrib in enumerate(contribs): if idx>0: if contrib[0] == contribs[idx-1][0]: contribs[idx] += (contribs[idx-1][3],) else: contribs[idx] += ('g',) if contribs[idx-1][3] == 'w' else ('w',) if contribs_curation_task: contribs_curation_task[0] += ('g',) for idx, contrib in enumerate(contribs_curation_task): if idx>0: if contrib[0] == contribs_curation_task[idx-1][0]: contribs_curation_task[idx] += (contribs_curation_task[idx-1][4],) else: contribs_curation_task[idx] += ('g',) if contribs_curation_task[idx-1][3] == 'w' else ('w',) if contribs_failed: contribs_failed[0] += ('g',) for idx, contrib in enumerate(contribs_failed): if idx>0: if contrib[0] == contribs_failed[idx-1][0]: contribs_failed[idx] += (contribs_failed[idx-1][3],) else: contribs_failed[idx] += ('g',) if contribs_failed[idx-1][3] == 'w' else ('w',) return render(request, 'monitor/monitor_user.html', {'dataset': dataset, 'username': user.username, 'contribs': contribs, 'contribs_curation': contribs_curation_task, 'contribs_failed': contribs_failed})
def update_course_daily_stats(self, last_tracker_pk=0, newest_tracker_pk=0): if last_tracker_pk == 0: CourseDailyStats.objects.all().delete() excluded_users = self.get_excluded_users() # get different (distinct) courses/dates involved course_daily_type_logs = Tracker.objects \ .filter(pk__gt=last_tracker_pk, pk__lte=newest_tracker_pk) \ .exclude(course__isnull=True) \ .exclude(user__in=excluded_users) \ .annotate(day=TruncDay('tracker_date')) \ .values('course', 'day', 'type') \ .annotate(total=Count('type')) \ .order_by('day') total_logs = course_daily_type_logs.count() self.stdout.write('%d different courses/dates/types to process.' % total_logs) count = 0 for type_log in course_daily_type_logs: course = Course.objects.get(pk=type_log['course']) stats, created = CourseDailyStats.objects.get_or_create( course=course, day=type_log['day'], type=type_log['type']) stats.total = (0 if last_tracker_pk == 0 else stats.total) + type_log['total'] stats.save() count += 1 self.stdout.write(str(count)) # get different (distinct) non-course logs involved noncourse_types = ['search', 'login', 'register'] noncourse_daily_logs = Tracker.objects \ .filter(pk__gt=last_tracker_pk, pk__lte=newest_tracker_pk, type__in=noncourse_types) \ .exclude(user__in=excluded_users) \ .annotate(day=TruncDay('tracker_date')) \ .values('day', 'type') \ .annotate(total=Count('type')) \ .order_by('day') self.stdout.write('%d different search/dates to process.' % noncourse_daily_logs.count()) for log in noncourse_daily_logs: stats, created = CourseDailyStats.objects.get_or_create( course=None, day=log['day'], type=log['type']) stats.total += log['total'] stats.save()
def get_chart1_data(): type_1_labels = [] type_1_stock = [] type_1_production = [] type_1_waste = [] days = get_days(5) for day in days: type_1_labels.append(day.strftime('%d/%m')) ed = day.replace(hour=23, minute=59, second=59, microsecond=0) st = ed.replace(hour=0, minute=0, second=0, microsecond=0) stock_qs=InventoryTransactions.objects. \ annotate(day=TruncDay('trxn_timestamp')). \ values('day'). \ filter(trxn_type=0,trxn_timestamp__lte=ed,trxn_timestamp__gte=st). \ annotate(weight=Sum('weight')) if stock_qs: for trxn in stock_qs: type_1_stock.append(round(trxn['weight'], 2)) else: type_1_stock.append(0) production_qs=InventoryTransactions.objects. \ annotate(day=TruncDay('trxn_timestamp')). \ values('day'). \ filter(trxn_type=1,trxn_timestamp__lte=ed,trxn_timestamp__gte=st). \ annotate(weight=Sum('weight')) if production_qs: for trxn in production_qs: type_1_production.append(round(trxn['weight'], 2)) else: type_1_production.append(0) waste_qs=InventoryTransactions.objects. \ annotate(day=TruncDay('trxn_timestamp')). \ values('day'). \ filter(trxn_type=2,trxn_timestamp__lte=ed,trxn_timestamp__gte=st). \ annotate(weight=Sum('weight')) if waste_qs: for trxn in waste_qs: type_1_waste.append(round(trxn['weight'], 2)) else: type_1_waste.append(0) type_1_data = [type_1_stock, type_1_production, type_1_waste] data = {'label': type_1_labels, 'data': type_1_data} return data
def dashboard_data(request): today = datetime.now(tz=timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0) orders_by_dates = Order.objects.annotate(day=TruncDay( 'created_on', tzinfo=timezone.utc)).values("day").annotate( value=Count('id')).values('day', 'value') orders_by_dates = {x["day"]: x["value"] for x in orders_by_dates} sellers_by_dates = Seller.objects.annotate(day=TruncDay( 'user__date_joined', tzinfo=timezone.utc)).values("day").annotate( value=Count('user_id')).values('day', 'value') sellers_by_dates = {x["day"]: x["value"] for x in sellers_by_dates} complete_order_dates = [] for day in (today - timedelta(n) for n in range(30, 0, -1)): if day in orders_by_dates.keys(): complete_order_dates.append({ "day": day, "value": orders_by_dates[day] }) else: complete_order_dates.append({"day": day, "value": 0}) complete_seller_dates = [] for day in (today - timedelta(n) for n in range(30, 0, -1)): if day in sellers_by_dates.keys(): complete_seller_dates.append({ "day": day, "value": sellers_by_dates[day] }) else: complete_seller_dates.append({"day": day, "value": 0}) return JsonResponse({ "total_products": Product.objects.all().count(), "total_companies": Company.objects.all().count(), "total_sellers": Seller.objects.all().count(), "total_orders": Order.objects.all().count(), "total_orders_value": OrderLine.objects.all().aggregate(Sum('price'))["price__sum"], "order_data": complete_order_dates, "seller_data": complete_seller_dates, })
def exam_signups_counts(request, course_code, exam_id): course, _ = course_auth(request, course_code, instructor=True) exam = get_object_or_404( Exam, pk=exam_id, course=course, ) # Compute time slots, exam slots time_slots = exam.time_slot_set \ .annotate(day=TruncDay('start_time')) \ .prefetch_related('exam_slot_set') exam_slots = exam.exam_slot_set \ .annotate(day=TruncDay('start_time_slot__start_time')) \ .select_related('start_time_slot') \ .prefetch_related('time_slots') \ .prefetch_related('time_slots__exam_slot_set') # Compute unregistered users num_course_users = course.course_user_set \ .count() num_registered = exam.exam_registration_set \ .exclude(exam_slot__isnull=True) \ .count() num_unregistered = num_course_users - num_registered # Compute unregistered students num_course_users_students = course.course_user_set \ .filter(user_type=CourseUser.STUDENT) \ .count() num_registered_students = exam.exam_registration_set \ .filter(course_user__user_type=CourseUser.STUDENT) \ .exclude(exam_slot__isnull=True) \ .count() num_unregistered_students = \ num_course_users_students - num_registered_students return render( request, 'registration/exam_signups_counts.html', { 'course': course, 'exam': exam, 'time_slots': time_slots, 'exam_slots': exam_slots, 'num_course_users': num_course_users, 'num_registered': num_registered, 'num_unregistered': num_unregistered, 'num_course_users_students': num_course_users_students, 'num_registered_students': num_registered_students, 'num_unregistered_students': num_unregistered_students, })
def compute_dataset_num_ground_truth_per_day(store_key, dataset_id): logger.info('Start computing data for {0}'.format(store_key)) try: dataset = Dataset.objects.get(id=dataset_id) num_ground_truth_not_from_propagation = GroundTruthAnnotation.objects\ .filter(sound_dataset__dataset=dataset)\ .filter(from_propagation=False)\ .annotate(day=TruncDay('created_at'))\ .values('day')\ .annotate(count=Count('id'))\ .values('day', 'count') num_ground_truth_from_propagation = GroundTruthAnnotation.objects\ .filter(sound_dataset__dataset=dataset)\ .filter(from_propagation=True)\ .annotate(day=TruncDay('created_at'))\ .values('day')\ .annotate(count=Count('id'))\ .values('day', 'count') start_date = GroundTruthAnnotation.objects\ .filter(sound_dataset__dataset=dataset)\ .order_by('created_at')[0].created_at.replace(tzinfo=None) end_date = datetime.datetime.now() dates = [str(start_date + datetime.timedelta(days=x))[:10] for x in range(0, (end_date - start_date).days)] num_ground_truth_not_from_propagation_per_day = {d: 0 for d in dates} num_ground_truth_not_from_propagation_per_day.update({str(o['day'])[:10]: o['count'] for o in num_ground_truth_not_from_propagation}) num_ground_truth_from_propagation_per_day = {d: 0 for d in dates} num_ground_truth_from_propagation_per_day.update({str(o['day'])[:10]: o['count'] for o in num_ground_truth_from_propagation}) store.set(store_key, { 'num_ground_truth_not_from_propagation_per_day': json.dumps(sorted([[day, count] for day, count in num_ground_truth_not_from_propagation_per_day.items()], key=lambda x: x[0])), 'num_ground_truth_from_propagation_per_day': json.dumps(sorted([[day, count] for day, count in num_ground_truth_from_propagation_per_day.items()], key=lambda x: x[0])) }) logger.info('Finished computing data for {0}'.format(store_key)) except Dataset.DoesNotExist: pass
def get_chart3_data(): type_3_labels = [] type_3_data = [] days = get_days(8) for day in days: type_3_labels.append(day.strftime('%d/%m')) ed = day.replace(hour=23, minute=59, second=59, microsecond=0) st = ed.replace(hour=0, minute=0, second=0, microsecond=0) sales=PackingSlips.objects. \ annotate(day=TruncDay('create_timestamp')). \ values('day'). \ filter(create_timestamp__lte=ed,create_timestamp__gte=st). \ annotate(amount=Sum('total_amount')) if sales: for trxn in sales: try: type_3_data.append(round(trxn['amount'])) except: type_3_data.append(0) else: type_3_data.append(0) data = {'label': type_3_labels, 'data': type_3_data} return data
def orders_per_day(self, request): starting_day = datetime.now() - timedelta(days=180) # Notes on this long expression: # 1. Use `str(order_data`.query)` to get the actual SQL code # 2. TruncDay here is simply used to "chunk down" the precision :) # 3. The `.values` behaves just like the one for dict (a list of dicts) order_data = (models.Order.objects.filter( date_added__gt=starting_day).annotate( day=TruncDay("date_added")).values("day").annotate( c=Count("id"))) # => labels: [ '2020-02-07' ] # => values: 2 (the amount of orders in THAT day) labels = [x["day"].strftime("%Y-%m-%d") for x in order_data] values = [x["c"] for x in order_data] context = dict( self.each_context(request), title="Orders per day", labels=labels, values=values, ) return TemplateResponse(request=request, template="orders_per_day.html", context=context)
def time_csv(request): request_dict = request.GET.dict() response = HttpResponse(content_type='text/csv') response[ 'Content-Disposition'] = 'attachment; filename="hashtags_dates.csv"' hashtags = hashtag_queryset(request_dict) earliest_date = hashtags[len(hashtags) - 1].timestamp.date() latest_date = hashtags.first().timestamp.date() time_dic = {} qs = hashtags.annotate(day=TruncDay('timestamp')).values('day').annotate( edits=Count('rc_id')).order_by() for item in qs: time_dic[item['day'].date()] = item['edits'] writer = csv.writer(response) writer.writerow([ # Translators: Date on which edit is made. _('Date'), # Translators: Edits done on wikimedia projects. _('Edits') ]) while earliest_date <= latest_date: if earliest_date in time_dic: temp = time_dic.pop(earliest_date) writer.writerow([earliest_date.strftime("%Y-%m-%d"), temp]) else: writer.writerow([earliest_date.strftime("%Y-%m-%d"), 0]) earliest_date = earliest_date + timedelta(days=1) return response
def changelist_view(self, request, extra_context=None): # Data in metrics chart chart_data = ( User.objects.annotate(date=TruncDay("date_joined")) .values("date") .annotate(y=Count("id")) .order_by("-date") ) # Monthly user creation metrics month_data = ( User.objects.annotate(date=TruncMonth("date_joined")) .values("date") .annotate(y=Count("id")) .order_by("-date") ) # Weekly user creation metrics weekly_data = ( User.objects.annotate(date=TruncWeek("date_joined")) .values("date") .annotate(y=Count("id")) .order_by("-date") ) # Convert to Json for the chart input as_json = json.dumps(list(chart_data), cls=DjangoJSONEncoder) # Pass all data into the change_list context extra_context = extra_context or {"chart_data": as_json, "weekly_data": weekly_data, "monthly_data": month_data} return super().changelist_view(request, extra_context=extra_context)
def orders_per_day(self, request): starting_day = datetime.now() - timedelta(days=180) order_data = ( models.Order.objects.filter( date_added__gt = starting_day ) .annotate( day = TruncDay("date_added") ) .values("day") .annotate(c=Count("id")) ) labels = [ x["day"].strftime("%Y-%m-%d") for x in order_data ] values = [ x["c"] for x in order_data ] context = dict( self.each_context(request), title="Orders per day", labels=labels, values = values ) return TemplateResponse(request, "orders_per_day.html", context)
def sales_api(request): startTime_month = timezone.now() - datetime.timedelta(days=30) qs = (Sale.objects.filter(timestamp__gt=startTime_month).annotate( day=TruncDay('timestamp')).values('day').annotate( c=Count('*')).annotate(r=Sum('price'))) db_sales = {i["day"].date(): (i["c"], money(i["r"])) for i in qs} base = timezone.now().date() date_list = [base - datetime.timedelta(days=x) for x in range(0, 30)] sales_list = [] revenue_list = [] for date in date_list: if date in db_sales: sales, revenue = db_sales[date] sales_list.append(sales) revenue_list.append(revenue) else: sales_list.append(0) revenue_list.append(0) items = { "day": date_list, "sales": sales_list, "revenue": revenue_list, } return JsonResponse(items)
def get_query_by_day(base_query, selected_month): return base_query \ .filter(exact_date__gte=selected_month, exact_date__lt=selected_month + relativedelta(months=+1)) \ .annotate(day=TruncDay('message__time')) \ .values('day', 'message__author', 'message__author__name', 'message__author__color') \ .annotate(total=Count('day')) \ .order_by('day')
def export_purchase_report(request, from_dt, to_dt): response = HttpResponse(content_type='application/ms-excel') filename = "pur_report_{}_to_{}.xls".format(from_dt, to_dt) response['Content-Disposition'] = 'attachment; filename='+filename current_url = resolve(request.path_info).url_name start_date = datetime.strptime(from_dt, '%Y-%m-%d') end_date = datetime.strptime(to_dt, '%Y-%m-%d') filename = current_url + str(from_dt) + "to" + str(to_dt) + ".xls" response['Content-Disposition'] = 'attachment; filename=' + str(filename) wb = xlwt.Workbook(encoding='utf-8') ws = wb.add_sheet('Users') session_id = request.session['session'] year_ending_obj = YearEnding.objects.get(id = session_id) row_num = 0 font_style = xlwt.XFStyle() font_style.font.bold = True if current_url == "PurMonthReport": columns = ['Month', 'Total CGST', 'Total SGST', 'Total Discount', 'Amount'] query = PurchaseInvHrd.objects.filter(id__gte = year_ending_obj.year_sale_id, \ doc_dt__gte = from_dt, doc_dt__lte = to_dt).annotate(month = TruncMonth('doc_dt')).values('month').\ annotate(amt = Sum('paid_amount'), total_cgst = Sum('paid_cgst'), \ total_sgst = Sum('paid_sgst'), total_disc = Sum('paid_discount')).\ values('month', 'amt', 'total_cgst', 'total_sgst', 'total_disc').order_by('month') rows = query.values_list('month', 'total_cgst', 'total_sgst', 'total_disc', 'amt') elif current_url == "PurDayReport": columns = ['Date', 'Total CGST', 'Total SGST', 'Total Discount', 'Amount'] query = PurchaseInvHrd.objects.filter(id__gte = year_ending_obj.year_sale_id, \ doc_dt__gte = from_dt, doc_dt__lte = to_dt).annotate(day = TruncDay('doc_dt'))\ .values('day').annotate(amt = Sum('paid_amount'), total_cgst = Sum('paid_cgst'), \ total_sgst = Sum('paid_sgst'), total_disc = Sum('paid_discount')\ ).values('day', 'amt', 'total_cgst', 'total_sgst', 'total_disc').order_by('day') rows = query.values_list('day', 'total_cgst', 'total_sgst', 'total_disc', 'amt') total = query.aggregate(total_amt = Sum('paid_amount'), total_cgst = Sum('paid_cgst'), \ total_sgst = Sum('paid_sgst'), total_disc = Sum('paid_discount')) for col_num in range(len(columns)): ws.write(row_num, col_num, columns[col_num], font_style) font_style = xlwt.XFStyle() for row in rows: row_num += 1 for col_num in range(len(row)): ws.write(row_num, col_num, str(row[col_num]), font_style) ws.write(row_num+1, 0, "Total", font_style) ws.write(row_num+1, 1, total['total_cgst'], font_style) ws.write(row_num+1, 2, total['total_sgst'], font_style) ws.write(row_num+1, 3, total['total_disc'], font_style) ws.write(row_num+1, 4, total['total_amt'], font_style) wb.save(response) return response
def tracker_dashboard(request): entries = Entries.objects.all().filter( Username=request.user).order_by('-DateTime') weight_labels = [] weight_data = [] for entry in entries: if entry.Tracking == "Weight": weight_labels.append(entry.DateTime.strftime("%d %b")) weight_data.append(entry.Numerical_Value) #exercise_count_by_day = Entries.objects.annotate(day=TruncDay('DateTime')).filter(Username=request.user,Tracking="Exercise").exclude(Additional_Information="Skipped").values('day').annotate(total=Count('id')) exercise_count_by_day = Entries.objects.annotate( day=TruncDay('DateTime')).filter( Username=request.user, Tracking="Exercise").exclude( Additional_Information="Skipped").values('day').annotate( total=Count('id')) exercise_labels = [] exercise_data = [] for exercise_day in exercise_count_by_day: exercise_labels.append(exercise_day["day"].strftime("%d %b")) exercise_data.append(exercise_day["total"]) return render( request, 'tracker/tracker_dashboard.html', { 'weight_labels': weight_labels, 'weight_data': weight_data, 'exercise_labels': exercise_labels, 'exercise_data': exercise_data })
def get_trackers(start_date, end_date, courses, students=None, date_data='tracker_date'): activity = [] no_days = (end_date - start_date).days + 1 if date_data == "submitted_date": trackers = Tracker.objects.filter(course__in=courses, submitted_date__gte=start_date, submitted_date__lte=end_date) else: trackers = Tracker.objects.filter(course__in=courses, tracker_date__gte=start_date, tracker_date__lte=end_date) if students: trackers.filter(user__in=students) trackers.annotate(day=TruncDay(date_data), month=TruncMonth(date_data), year=TruncYear(date_data)) \ .values('day') \ .annotate(count=Count('id')) for i in range(0, no_days, +1): temp = start_date + datetime.timedelta(days=i) temp_date = temp.date().strftime(constants.STR_DATE_DISPLAY_FORMAT) count = next((dct['count'] for dct in trackers if dct['day'].strftime(constants.STR_DATE_DISPLAY_FORMAT) == temp_date), 0) activity.append([temp.strftime(constants.STR_DATE_DISPLAY_FORMAT), count]) return activity
def get_daily_upload_stats(self): ''' Gets the number of of experiments uploaded per day for the current week ''' week = self.get_week() qs = self.user.company.experiment_set.filter( create_timestamp__gte=week[0], create_timestamp__lt=week[1]) \ .annotate(day=TruncDay('create_timestamp'))\ .values('day') \ .annotate(count=Count('id')) \ .values('day', 'count') data = {i['day'].strftime("%a %b %d"): i['count'] for i in qs} current = week[0] while current < week[1]: datestring = current.strftime("%a %b %d") if datestring not in data: data[datestring] = 0 current += timedelta(days=1) self.send_json({'action': 'get_daily_upload_stats', 'data': data})
def _determineTrunc(subject: str, period: str) -> Tuple[Union[TruncHour, TruncDay, TruncWeek, TruncMonth], str]: if period == "Hour": fields = """ FLOOR(DATE_PART('day', first_date - %s) * 24 + DATE_PART('hour', first_date - %s)) AS first_date, FLOOR(DATE_PART('day', timestamp - first_date) * 24 + DATE_PART('hour', timestamp - first_date)) AS date, """ return TruncHour(subject), fields elif period == "Day": fields = """ FLOOR(DATE_PART('day', first_date - %s)) AS first_date, FLOOR(DATE_PART('day', timestamp - first_date)) AS date, """ return TruncDay(subject), fields elif period == "Week": fields = """ FLOOR(DATE_PART('day', first_date - %s) / 7) AS first_date, FLOOR(DATE_PART('day', timestamp - first_date) / 7) AS date, """ return TruncWeek(subject), fields elif period == "Month": fields = """ FLOOR((DATE_PART('year', first_date) - DATE_PART('year', %s)) * 12 + DATE_PART('month', first_date) - DATE_PART('month', %s)) AS first_date, FLOOR((DATE_PART('year', timestamp) - DATE_PART('year', first_date)) * 12 + DATE_PART('month', timestamp) - DATE_PART('month', first_date)) AS date, """ return TruncMonth(subject), fields else: raise ValueError(f"Period {period} is unsupported.")
def _computation(self): annotated = self.twitter_users.annotate(day=TruncDay('created_at')) grouped_by = annotated.values('day').annotate(dcount=Count('id_int')) ordered_by_freq = grouped_by.order_by('-dcount') ordered_by_date = grouped_by.order_by('day') dated_distribution = UserCreationDateDistribution.objects.create( metric=self, frequency='d') UserDistributionDate.objects.bulk_create([ UserDistributionDate(counter=i['dcount'], date=i['day'], distribution=dated_distribution) for i in ordered_by_date ]) for e in ordered_by_freq[:self.number_of_communities]: relevant = annotated.filter(day__date=e['day']) community = Community(metric=self) community.campaign = self.campaign community.description = 'Users of %s created on %s' % ( self.campaign.name, e['day'].strftime('%Y-%m-%d')) community.name = 'Created on %s' % e['day'].strftime('%Y-%m-%d') community.save() community.twitter_users.set(relevant) text = 'Created on %s as %d others' % ( e['day'].strftime('%Y-%m-%d'), e['dcount']) description = 'Part of the group of %d users created on %s for campaign %s' % ( e['dcount'], e['day'].strftime('%Y-%m-%d'), self.campaign.name) community.add_fact(self, text, description) logger.debug('Created community %d with %d users for date %s' % (community.id, e['dcount'], e['day'])) return True
def get_query_by_day_user(base_query, user): return base_query \ .filter(message__author=user) \ .annotate(day=TruncDay('message__time')) \ .values('day') \ .annotate(total=Count('day')) \ .order_by('day')
def posco_set_ep_access_day_mean(self): epobj = EP_log.objects.filter(Q(employeeID=self.employeeID) &Q(eval_date__gte = self.start_date)&Q(eval_date__lte = self.eval_date+datetime.timedelta(days=1))) if epobj.count() == 0: return 0 avg = epobj.annotate(day=TruncDay("eval_date")).values("day").annotate(the_count=Count("employeeID")).aggregate(avg=Avg("the_count"))["avg"] self.ep_access_day_mean = avg return self.ep_access_day_mean
def chart_daily(self): return ( PointIssued.objects.annotate(date=TruncDay("created_at")) .values("date") .annotate(y=Sum("point")) .order_by("-date") )
def changelist_view(self, request, extra_context=None): # Aggregate new users per day chart_data = ( UserMetric.objects.annotate(date=TruncDay("date_joined")) .values("date") .annotate(y=Count("id")) .order_by("-date") ) # Serialize and attach the chart data to the template context as_json = json.dumps(list(chart_data), cls=DjangoJSONEncoder) extra_context = extra_context or {"chart_data": as_json} response = super().changelist_view( request, extra_context=extra_context,) try: qs = response.context_data['cl'].queryset except (AttributeError, KeyError): return response # Count of all users metrics = { 'total': Count('id'), } response.context_data['summary_total'] = dict( qs.aggregate(**metrics) ) return response