def __init__(self, lhs, rhs): super(TemporalSubtraction, self).__init__(lhs, self.SUB, rhs, output_field=fields.DurationField())
def stats(request): user_role = User_Role.objects.get(user_id=request.user.id) if user_role.role.type == 'A': duration = ExpressionWrapper(F('logout_time') - F('login_time'), output_field=fields.DurationField()) stats = User_Stat.objects.values('user_id').annotate(duration=Sum(duration)).annotate( dcount=Count('user_id')).filter(duration__gt=timedelta(seconds=2)).order_by('user_id') context = { 'stats' : stats, 'total_online' : User_Role.objects.filter(is_logged_in=True).aggregate(Count('id')), 'online_users' : User_Role.objects.filter(is_logged_in=True).values_list('user_id', flat=True), } return render(request, 'bms/stats.html', context) else: return HttpResponseForbidden()
def get(self, request, **kwargs): try: start_date = request.GET.get('startDate', '120d') start_date = parse_relative_date(start_date) except ValueError as e: raise ParseError(detail=f'Invalid start date: {start_date}') try: end_date = request.GET.get('endDate', '0d') end_date = parse_relative_date(end_date) except ValueError: raise ParseError(detail=f'Invalid end date: {end_date}') try: stats_period = max(int(request.GET.get('statsPeriod', '10')), 1) except ValueError: raise ParseError(detail=f'Invalid stats period: {stats_period}') try: slug = kwargs['account_slug'] account = Account.objects.get(holders__in=[request.user], slug=slug) except Account.DoesNotExist: raise Http404 activities = AccountActivity.objects \ .filter(account=account) \ .filter(date__gte=start_date) \ .filter(date__lte=end_date) first_date = activities.aggregate(Min('date'))['date__min'] period = ExpressionWrapper((F('date') - first_date) / stats_period, output_field=fields.DurationField()) period_date = ExpressionWrapper(first_date + stats_period * F('period'), output_field=fields.DateField()) activities = activities.annotate(period=period) \ .order_by('period') \ .values('period') \ .annotate( period_date=period_date, period_balance=ArrayAgg('balance'), period_deposit=Sum('deposit'), period_withdrawl=Sum('withdrawl'), ) # if there is a period of inactivity on the account, the series # will be missing data during those periods, so let's fill them activities = list(activities) periods = {activity['period'] for activity in activities} all_periods = set(range(max(periods) + 1)) for period in sorted(all_periods - periods): # since we align the beginning of the activities to the # first activity within the defined date range, the 0th # period should always be defined assert period != 0 previous = activities[period - 1] activities.insert( period, { 'period': period, 'period_date': previous['period_date'] + timedelta(days=stats_period), 'period_balance': [previous['period_balance'][-1]], 'period_deposit': 0., 'period_withdrawl': 0., }) # drop the extraneous balance data, we just want the final balance # at the end of each period for activity in activities: activity['period_balance'] = activity['period_balance'][-1] activity['period_withdrawl'] = -activity['period_withdrawl'] return Response(activities, status=status.HTTP_200_OK)
def calculate_distinct_sorted_leaderboard_data( user, challenge_obj, challenge_phase_split, only_public_entries, order_by ): """ Function to calculate and return the sorted leaderboard data Arguments: user {[Class object]} -- User model object challenge_obj {[Class object]} -- Challenge model object challenge_phase_split {[Class object]} -- Challenge phase split model object only_public_entries {[Boolean]} -- Boolean value to determine if the user wants to include private entries or not Returns: [list] -- Ranked list of participant teams to be shown on leaderboard [status] -- HTTP status code (200/400) """ # Get the leaderboard associated with the Challenge Phase Split leaderboard = challenge_phase_split.leaderboard # Get the default order by key to rank the entries on the leaderboard default_order_by = None is_leaderboard_order_descending = ( challenge_phase_split.is_leaderboard_order_descending ) try: default_order_by = leaderboard.schema["default_order_by"] except KeyError: response_data = { "error": "Sorry, default_order_by key is missing in leaderboard schema!" } return response_data, status.HTTP_400_BAD_REQUEST # Use order by field from request only if it is valid try: if order_by in leaderboard.schema["labels"]: default_order_by = order_by except KeyError: response_data = { "error": "Sorry, labels key is missing in leaderboard schema!" } return response_data, status.HTTP_400_BAD_REQUEST leaderboard_schema = leaderboard.schema if ( leaderboard_schema.get("metadata") is not None and leaderboard_schema.get("metadata").get(default_order_by) is not None ): is_leaderboard_order_descending = ( leaderboard_schema["metadata"][default_order_by].get( "sort_ascending" ) is False ) # Exclude the submissions done by members of the host team # while populating leaderboard challenge_hosts_emails = ( challenge_obj.creator.get_all_challenge_host_email() ) is_challenge_phase_public = challenge_phase_split.challenge_phase.is_public # Exclude the submissions from challenge host team to be displayed on the leaderboard of public phases challenge_hosts_emails = ( [] if not is_challenge_phase_public else challenge_hosts_emails ) challenge_host_user = is_user_a_host_of_challenge(user, challenge_obj.pk) all_banned_email_ids = challenge_obj.banned_email_ids # Check if challenge phase leaderboard is public for participant user or not if ( challenge_phase_split.visibility != ChallengePhaseSplit.PUBLIC and not challenge_host_user ): response_data = {"error": "Sorry, the leaderboard is not public!"} return response_data, status.HTTP_400_BAD_REQUEST leaderboard_data = LeaderboardData.objects.exclude( Q(submission__created_by__email__in=challenge_hosts_emails) & Q(submission__is_baseline=False) ) # Get all the successful submissions related to the challenge phase split all_valid_submission_status = [Submission.FINISHED] # Handle the case for challenges with partial submission evaluation feature if ( challenge_phase_split.challenge_phase.is_partial_submission_evaluation_enabled ): all_valid_submission_status.append(Submission.PARTIALLY_EVALUATED) leaderboard_data = leaderboard_data.filter( challenge_phase_split=challenge_phase_split, submission__is_flagged=False, submission__status__in=all_valid_submission_status, ).order_by("-created_at") if challenge_phase_split.show_execution_time: time_diff_expression = ExpressionWrapper( F("submission__completed_at") - F("submission__started_at"), output_field=fields.DurationField(), ) leaderboard_data = leaderboard_data.annotate( filtering_score=RawSQL( "result->>%s", (default_order_by,), output_field=FloatField() ), filtering_error=RawSQL( "error->>%s", ("error_{0}".format(default_order_by),), output_field=FloatField(), ), submission__execution_time=time_diff_expression, ).values( "id", "submission__participant_team", "submission__participant_team__team_name", "submission__participant_team__team_url", "submission__is_baseline", "submission__is_public", "challenge_phase_split", "result", "error", "filtering_score", "filtering_error", "leaderboard__schema", "submission__submitted_at", "submission__method_name", "submission__id", "submission__submission_metadata", "submission__execution_time", "submission__is_verified_by_host", ) else: leaderboard_data = leaderboard_data.annotate( filtering_score=RawSQL( "result->>%s", (default_order_by,), output_field=FloatField() ), filtering_error=RawSQL( "error->>%s", ("error_{0}".format(default_order_by),), output_field=FloatField(), ), ).values( "id", "submission__participant_team", "submission__participant_team__team_name", "submission__participant_team__team_url", "submission__is_baseline", "submission__is_public", "challenge_phase_split", "result", "error", "filtering_score", "filtering_error", "leaderboard__schema", "submission__submitted_at", "submission__method_name", "submission__id", "submission__submission_metadata", "submission__is_verified_by_host", ) if only_public_entries: if challenge_phase_split.visibility == ChallengePhaseSplit.PUBLIC: leaderboard_data = leaderboard_data.filter( submission__is_public=True ) all_banned_participant_team = [] for leaderboard_item in leaderboard_data: participant_team_id = leaderboard_item["submission__participant_team"] participant_team = ParticipantTeam.objects.get(id=participant_team_id) all_participants_email_ids = ( participant_team.get_all_participants_email() ) for participant_email in all_participants_email_ids: if participant_email in all_banned_email_ids: all_banned_participant_team.append(participant_team_id) break if leaderboard_item["error"] is None: leaderboard_item.update(filtering_error=0) if leaderboard_item["filtering_score"] is None: leaderboard_item.update(filtering_score=0) if challenge_phase_split.show_leaderboard_by_latest_submission: sorted_leaderboard_data = leaderboard_data else: sorted_leaderboard_data = sorted( leaderboard_data, key=lambda k: ( float(k["filtering_score"]), float(-k["filtering_error"]), ), reverse=True if is_leaderboard_order_descending else False, ) distinct_sorted_leaderboard_data = [] team_list = [] for data in sorted_leaderboard_data: if ( data["submission__participant_team__team_name"] in team_list or data["submission__participant_team"] in all_banned_participant_team ): continue elif data["submission__is_baseline"] is True: distinct_sorted_leaderboard_data.append(data) else: distinct_sorted_leaderboard_data.append(data) team_list.append(data["submission__participant_team__team_name"]) leaderboard_labels = challenge_phase_split.leaderboard.schema["labels"] for item in distinct_sorted_leaderboard_data: item_result = [] for index in leaderboard_labels: # Handle case for partially evaluated submissions if index in item["result"].keys(): item_result.append(item["result"][index]) else: item_result.append("#") item["result"] = item_result if item["error"] is not None: item["error"] = [ item["error"]["error_{0}".format(index)] for index in leaderboard_labels ] return distinct_sorted_leaderboard_data, status.HTTP_200_OK
def _load_library_statistics(self): # Load users ordered by number of withdraws users = [{ "id": user.id, "name": f"{user.first_name} {user.last_name}", "count": user.num_withdraws, } for user in User.objects.annotate( num_withdraws=Count("withdraw")).order_by("-num_withdraws")][:5] # Load games order by number of withdraws all_popular_games = LibraryGame.objects.annotate( num_withdraws=Count("withdraw")).order_by("-num_withdraws") ongoing_withdraws = Withdraw.objects.filter( date_returned__isnull=True).aggregate( min_players=Sum("game__game__min_players"), max_players=Sum("game__game__max_players"), total=Count("*"), ) duration = ExpressionWrapper( F("date_returned") - F("date_withdrawn"), output_field=fields.DurationField(), ) average_duration = (Withdraw.objects.annotate( duration=duration).filter(date_returned__isnull=False).aggregate( average=Avg(duration))) popular_games = [{ "id": game.id, "bggid": game.game.bggid, "game": f"{game.game.name}", "requisitions": game.num_withdraws, } for game in all_popular_games[:5]] return { "games": { "total": LibraryGame.objects.all().count(), "being_played": ongoing_withdraws.get("total"), "not_checked_in": LibraryGame.objects.filter(location__isnull=True).count(), }, "requisitors": { "total": User.objects.all().count(), }, "withdraws": { "total": Withdraw.objects.all().count(), "recent": { "out": [{ "game_id": w.game_id, "bggid": w.game.game.bggid, "game": w.game.game.name, "image": w.game.game.image, "date": w.date_withdrawn, } for w in Withdraw.objects.all().order_by( "-date_withdrawn")[:8]], "in": [{ "game_id": w.game_id, "bggid": w.game.game.bggid, "image": w.game.game.image, "game": w.game.game.name, "date": w.date_returned, } for w in Withdraw.objects.all().order_by( "-date_returned")[:8]], }, "popular": { "requisitors": users, "games": popular_games }, "players": { "min_players": ongoing_withdraws.get("min_players"), "max_players": ongoing_withdraws.get("max_players"), }, "duration": average_duration, }, }
def __init__(self, lhs, rhs): super().__init__(lhs, self.SUB, rhs, output_field=fields.DurationField())
def candidate_list(request): username = auth.get_user(request) verified_employer = is_verified_employer(username, request) allowed = is_allowed(username, request) or verified_employer if not allowed: return render( request, 'ra/not_allowed.html', ) # User is allowed to access page queryset = Candidate.objects.all() count = queryset.count() if request.method == 'POST': if 'clear_all_filters' in request.POST: filter_form = FilterForm() else: filter_form = FilterForm(request.POST) if filter_form.is_valid(): name = filter_form.cleaned_data['name'] if name is not None and len(name) > 0: queryset = queryset.filter(name__icontains=name) date_of_birth = filter_form.cleaned_data['date_of_birth'] if date_of_birth is not None: queryset = queryset.filter(date_of_birth=date_of_birth) date_created = filter_form.cleaned_data['date_created'] if date_created is not None: queryset = queryset.filter( candidate_username__date_joined__date=date_created) gender = filter_form.cleaned_data['gender'] if gender is not None and len(gender) > 0: queryset = queryset.filter(gender=gender) professional_qualifications = filter_form.cleaned_data[ 'professional_qualifications'] if professional_qualifications is not None and len( professional_qualifications) > 0: queryset = queryset.filter( professionalqualifications__class_degree__iexact= professional_qualifications).annotate( institute_name_length=Length( 'professionalqualifications__institute_name') ).filter(institute_name_length__gt=0) minimum_experience = filter_form.cleaned_data[ 'minimum_experience'] if minimum_experience is not None and len( minimum_experience) > 0: duration = ExpressionWrapper( F('experience__date_to') - F('experience__date_from'), output_field=fields.DurationField()) queryset = queryset.annotate( experience_duration=duration).annotate( total_experience=Sum('experience_duration')) entered_xp_duration = datetime.timedelta( days=int(minimum_experience) * 30) queryset = queryset.filter( total_experience__gt=entered_xp_duration) eligibility_tests = filter_form.cleaned_data[ 'eligibility_tests'] if eligibility_tests is not None and len( eligibility_tests) > 0: queryset = queryset.filter( eligibilitytests__eligibility_tests__iexact= eligibility_tests).annotate( score_grade_marks_length=Length( 'eligibilitytests__score_grade_marks')).filter( score_grade_marks_length__gt=0) temp = filter_form.cleaned_data['verified'] if temp is not None and len(temp) > 0: verified = not temp in ['False', 'No'] queryset = queryset.filter( is_provisional_registration_number=verified) if 'save_candidate_list' in request.POST: name = 'Temporary candidate list: ' + str( timezone.datetime.now()) candidate_list = save_candidate_list(name, queryset) return HttpResponseRedirect('/ra/save_list?list_id=' + str(candidate_list.list_id)) else: gender = 'Any' experience = '' eligibility_tests = '' try: gender = request.GET.__getitem__('gender') experience = request.GET.__getitem__('experience') eligibility_tests = request.GET.__getitem__('eligibility_tests') except KeyError: pass filter_form = FilterForm( initial={ 'gender': gender, 'minimum_experience': experience, 'eligibility_tests': eligibility_tests }) if verified_employer: queryset = queryset.filter(is_provisional_registration_number=False) count = queryset.count() return render( request, 'ra/candidate_list.html', { 'queryset': queryset, 'filter_form': filter_form, 'count': count, 'verified_employer': verified_employer, }, )
# from test1.appointments.models import Appointment # from django.utils import timezone # from datetime import timedelta # from diagnosis.models import Diagnosis from django.db.models import Func, ExpressionWrapper, F, fields, Avg SomeModel.objects.annotate( duration=Func(F('end_date'), F('start_date'), function='age')) from datetime import timedelta #duration = ExpressionWrapper(F('closed_at') - F('opened_at'), output_field=fields.DurationField()) dur = Diagnosis.objects.annotate(duration=ExpressionWrapper( F('closed_time') - F('filed_time'), output_field=fields.DurationField())) dur.values('duration') dur.aggregate(Avg('duration')) Diagnosis.objects.values('duration') my_diag = Diagnosis.objects.filter(appointment__doctor__user_id=2).values() my_diag1 = my_diag.filter(d_type=1) dur1 = my_diag1.annotate( duration=ExpressionWrapper(F('filed_time') - F('appointment__appointment_time'), output_field=fields.DurationField())) from profiles.models import Patient, Doctor from datetime import date
def get(self, request, *args, **kwargs): # graph 1 tasks_set = BoardTask.objects.values("task_id__task_name")\ .annotate(d_count=Count('task_id'), avg_rating=Avg('task_rating')) v = {k: [dic[k] for dic in tasks_set] for k in tasks_set[0]} source1 = ColumnDataSource(data=v) p1 = figure(title="Simple line example", x_axis_label='x', y_axis_label='y', x_range=v['task_id__task_name']) p1.vbar(x='task_id__task_name', top='avg_rating', width=.9, line_color='white', color="#e84d60", legend_label="tasks", source=source1) p1.xaxis.major_label_orientation = 1 # graph 2 duration = ExpressionWrapper( (F("task_end_time") - F("task_start_time")) + (F("task_end_date") - F("task_start_date")), output_field=fields.DurationField()) data = BoardTask.objects.annotate(duration=duration) \ .annotate(duration=timedelta_to_seconds(F("duration"))) \ .values("task_id__task_name") \ .annotate(sum_duration=Sum('duration')) \ v = {k: [dic[k] for dic in data] for k in data[0]} source2 = ColumnDataSource(data=v) print(v) p2 = figure(title="Simple line example", x_axis_label='x', y_axis_label='y', x_range=v['task_id__task_name']) p2.vbar(x='task_id__task_name', top='sum_duration', width=.9, line_color='white', color="#e84d60", legend_label="tasks", source=source2) p2.xaxis.major_label_orientation = 1 # Store components script, div = components(row(p1, p2)) self.object_list = self.get_queryset() context = self.get_context_data(object_list=self.object_list, script=script, div=div) return self.render_to_response(context)
def countPatient(request): user = request.user counts = {} d = Doctor.objects.get(user_id=user.id) my_appt1 = d.appointment_doctor.filter(appt_access=1) my_appt2 = d.appointment_doctor.filter(appt_access=2) counts['male1'] = my_appt1.filter(patient__sex=1).count() counts['male2'] = my_appt2.filter(patient__sex=1).count() counts['female1'] = my_appt1.filter(patient__sex=2).count() counts['female2'] = my_appt2.filter(patient__sex=2).count() counts['appt1'] = my_appt1.count() counts['appt2'] = my_appt2.count() # counts['1below25'] = my_appt1.filter(patient__age__lt=35).count() # counts['2below25'] = my_appt2.filter(patient__sex=2).count() # mypid = [] # for i in d.appointment_doctor.all(): # mypid.append(i.patient.id) # mypatientid = list(set(mypid)) # my_patient = Patient.objects.filter(pk__in=mypatientid) my_diag = Diagnosis.objects.filter( appointment__doctor__user_id=user.id).values() counts["type1"] = my_diag.filter(d_type=1).count() counts["type2"] = my_diag.filter(d_type=2).count() counts["type3"] = my_diag.filter(d_type=3).count() my_diag = my_diag.annotate( duration=ExpressionWrapper(F('filed_time') - F('appointment__appointment_time'), output_field=fields.DurationField())) my_diag1 = my_diag.filter(Q(d_type=1) & Q(appointment__appt_access=1)) my_diag2 = my_diag.filter(Q(d_type=2) & Q(appointment__appt_access=1)) my_diag3 = my_diag.filter(Q(d_type=3) & Q(appointment__appt_access=1)) my_diag4 = my_diag.filter(Q(d_type=1) & Q(appointment__appt_access=2)) my_diag5 = my_diag.filter(Q(d_type=2) & Q(appointment__appt_access=2)) my_diag6 = my_diag.filter(Q(d_type=3) & Q(appointment__appt_access=2)) if my_diag1: counts['w1'] = int( my_diag1.aggregate( Avg('duration'))['duration__avg'].total_seconds()) / 60 else: counts['w1'] = 0 if my_diag2: counts['w2'] = int( my_diag2.aggregate( Avg('duration'))['duration__avg'].total_seconds()) / 60 else: counts['w2'] = 0 if my_diag3: counts['w3'] = int( my_diag3.aggregate( Avg('duration'))['duration__avg'].total_seconds()) / 60 else: counts['w3'] = 0 if my_diag4: counts['w4'] = int( my_diag4.aggregate( Avg('duration'))['duration__avg'].total_seconds()) / 60 else: counts['w4'] = 0 if my_diag5: counts['w5'] = int( my_diag5.aggregate( Avg('duration'))['duration__avg'].total_seconds()) / 60 else: counts['w5'] = 0 if my_diag6: counts['w6'] = int( my_diag6.aggregate( Avg('duration'))['duration__avg'].total_seconds()) / 60 else: counts['w6'] = 0 counts['s1'] = counts['w1'] + counts['w4'] counts['s2'] = counts['w2'] + counts['w5'] counts['s3'] = counts['w3'] + counts['w6'] if counts['type1'] == 0: counts['a1'] = 0 else: counts['a1'] = counts['s1'] / counts['type1'] if counts['type2'] == 0: counts['a2'] = 0 else: counts['a2'] = counts['s2'] / counts['type2'] if counts['type3'] == 0: counts['a3'] = 0 else: counts['a3'] = counts['s3'] / counts['type3'] return render(request, 'reports/data_analysis.html', counts)
def _front_page(paging_size=settings.PAGING_SIZE, page=0, add_filter={}, add_q=[], as_of=None, days_back=50): # TODO: weighting https://medium.com/hacking-and-gonzo/how-hacker-news-ranking-algorithm-works-1d9b0cf2c08d # (P-1) / (T+2)^G if as_of is None: now = timezone.now() else: now = as_of if connection.vendor == 'postgresql': now_value = Value(now, output_field=fields.DateTimeField()) submission_age_float = ExpressionWrapper( ( now_value - F('created_at')), output_field=fields.DurationField()) submission_age_hours = ExpressionWrapper(Extract(F('tf'), 'epoch') / 60 / 60 + 2.1 , output_field=fields.FloatField()) real_p = ExpressionWrapper(F('points') - 1, output_field=fields.FloatField()) formula = ExpressionWrapper( F('p') / ( Power(F('tfh'), F('g')) +0.001) , output_field=fields.FloatField()) return Story.objects.select_related('user')\ .filter(duplicate_of__isnull=True)\ .filter(points__gte=1) \ .filter(created_at__gte=now - datetime.timedelta(days=days_back)) \ .filter(created_at__lte=now) \ .filter(**add_filter) \ .annotate(tf=submission_age_float) \ .annotate(tfh=submission_age_hours) \ .annotate(p=real_p) \ .annotate(g=Value(1.8, output_field=fields.FloatField())) \ .annotate(formula=formula) \ .order_by('-formula')[(page*paging_size):(page+1)*(paging_size)] elif connection.vendor == 'sqlite': now_value = Value(now, output_field=fields.DateTimeField()) submission_age_float = ExpressionWrapper( ( now_value - F('created_at')), output_field=fields.FloatField()) submission_age_hours = ExpressionWrapper(F('tf') / 60 / 60 / 1000000 + 2.1 , output_field=fields.FloatField()) real_p = ExpressionWrapper(F('points') - 1, output_field=fields.FloatField()) formula = ExpressionWrapper( F('p') / ( Power(F('tfh'), F('g')) +0.001) , output_field=fields.FloatField()) return Story.objects.select_related('user')\ .filter(duplicate_of__isnull=True)\ .filter(points__gte=1) \ .filter(created_at__gte=now - datetime.timedelta(days=days_back)) \ .filter(created_at__lte=now) \ .filter(**add_filter) \ .annotate(tf=submission_age_float) \ .annotate(tfh=submission_age_hours) \ .annotate(p=real_p) \ .annotate(g=Value(1.8, output_field=fields.FloatField())) \ .annotate(formula=formula) \ .order_by('-formula')[(page*paging_size):(page+1)*(paging_size)] else: raise NotImplementedError("No frontpage magic for database engine %s implemented"%(connection.vendor))
def queryset(self, request, queryset): if not self.value(): flt = {} else: max = ExpressionWrapper(F(field_name) - Min('{}__{}'.format(distinct, field_name)), output_field=fields.DurationField()) queryset = queryset.annotate(max_time=max) flt = {'max_time': timedelta(hours=0)} return queryset.filter(**flt)
def es_update(**kwargs): import datetime from elasticsearch_dsl import Search from elasticsearch.helpers import parallel_bulk from django.db.models import F, ExpressionWrapper, fields, Q from mainapp.models import Document from nlpmonitor.settings import ES_CLIENT, ES_INDEX_DOCUMENT import logging es_logger = logging.getLogger('elasticsearch') es_logger.setLevel(logging.ERROR) # Init index = kwargs['index'] print("!!!", "Getting documents to update", datetime.datetime.now()) qs = Document.objects.exclude(Q(num_views=None) & Q(num_comments=None)) qs = qs.only('id', 'num_views', 'num_comments', 'datetime_activity_parsed', 'datetime_activity_es_updated') qs = qs.annotate(timedelta_parsed_to_updated=ExpressionWrapper( F('datetime_activity_parsed') - F('datetime_activity_es_updated'), output_field=fields.DurationField())) qs = qs.filter( Q(timedelta_parsed_to_updated__gte=datetime.timedelta(minutes=1)) | Q(datetime_activity_es_updated=None)) number_of_documents = qs.count() if number_of_documents == 0: return "Nothing to update" print("!!!", "Start updating ES index", number_of_documents, "docs to update", datetime.datetime.now()) def activity_update_generator(): updated = 0 docs_processed = 0 for doc in qs: update_body = {} if doc.num_views is not None: update_body['num_views'] = doc.num_views if doc.num_comments is not None: update_body['num_comments'] = doc.num_comments s = Search(using=ES_CLIENT, index=ES_INDEX_DOCUMENT).filter("term", id=doc.id)[:100] _ids = (hit.meta.id for hit in s.execute()) for _id in _ids: s = Search(using=ES_CLIENT, index=index).filter("term", document_es_id=_id)[:100] _td_ids = (hit.meta.id for hit in s.execute()) for _td_id in _td_ids: updated += 1 yield { "_index": index, "_op_type": "update", "_id": _td_id, "doc": update_body, } docs_processed += 1 if docs_processed != 0 and docs_processed % 1000 == 0: print(f"{docs_processed}/{number_of_documents} processed", datetime.datetime.now()) success = 0 failed = 0 for ok, result in parallel_bulk(ES_CLIENT, activity_update_generator(), index=index, chunk_size=1000, raise_on_error=True, thread_count=2): if not ok: failed += 1 else: success += 1 if success % 1000 == 0: print(f"{success} es docs updated, {datetime.datetime.now()}") if failed > 5: raise Exception("Too many failed ES!!!") return f"{success} docs updated"
def get(self, request, *args, **kwargs): if not request.user.is_authenticated: return render(request, "home.html", {}) pay_summary = TimeLog.objects.filter(timestamp__range=[dt.datetime.strptime(kwargs['report_start'],'%Y-%m-%d'),dt.datetime.strptime(kwargs['report_end'],'%Y-%m-%d')])\ .order_by('user__last_name')\ .filter(reconciled=False)\ .filter(pay__gt=0)\ .values('user__username')\ .annotate(pay=Sum('pay')).annotate(fullname=Concat('user__last_name', Value(', '), 'user__first_name')) detail_pay = TimeLog.objects.filter(timestamp__range=[dt.datetime.strptime(kwargs['report_start'],'%Y-%m-%d'), dt.datetime.strptime(kwargs['report_end'],'%Y-%m-%d')]).order_by('user__last_name').filter(reconciled=False).filter(pay__gt=0) duration = ExpressionWrapper((F('work_end_time')-F('work_start_time')), output_field=fields.DurationField()) other_summary = TimeLog.objects.filter(timestamp__range=[dt.datetime.strptime(kwargs['report_start'],'%Y-%m-%d'),dt.datetime.strptime(kwargs['report_end'],'%Y-%m-%d')])\ .order_by('user__last_name')\ .filter(reconciled=False)\ .filter(reason__billable=False)\ .values('user__username','reason__reason')\ .annotate(time=Sum(duration)).annotate(fullname=Concat('user__last_name', Value(', '), 'user__first_name')) other = TimeLog.objects.filter(timestamp__range=[dt.datetime.strptime(kwargs['report_start'],'%Y-%m-%d'), dt.datetime.strptime(kwargs['report_end'],'%Y-%m-%d')]).order_by('user__last_name').filter(reconciled=False).filter(reason__billable=False) return render(request, "timelog/report.html", {'pay_summary':pay_summary, 'detail_pay':detail_pay, 'other_summary':other_summary, 'other':other, 'start_date':kwargs['report_start'], 'end_date':kwargs['report_end']})
def flight_list_index(request): username = request.session.get('username') password = request.session.get('password') user = authenticate(request, username=username, password=password) if user is not None: #Request data retrieved from ajax call return_flt = request.POST.get('return_flt') if return_flt == "true": return_flt = True else: return_flt = False flights_leaving_from = request.POST.get('from') flights_going_to = request.POST.get('to') present_date = datetime.now().date() form_outbound_date = request.POST.get('outbound_date') form_outbound_date = datetime.strptime(form_outbound_date, "%m/%d/%Y").date() adult_pax = int(request.POST.get('adult_pax')) kids_pax = int(request.POST.get('kids_pax')) infant_pax = int(request.POST.get('infant_pax')) total_pax = adult_pax + kids_pax + infant_pax adult_chck = (adult_pax>0) kids_chck = (kids_pax>0) infant_chck = (infant_pax>0) outbound_flights = BookingFlights.objects.filter(orig=flights_leaving_from,dest=flights_going_to,dep_date__date=form_outbound_date).annotate(flight_hrs=ExtractHour(ExpressionWrapper(F('arr_date')-F('dep_date'),output_field=fields.DurationField())),flight_mins=ExtractMinute(ExpressionWrapper(F('arr_date')-F('dep_date'),output_field=fields.DurationField()))) if return_flt: #print("Return:True)") form_inbound_date = request.POST.get('inbound_date') form_inbound_date = datetime.strptime(form_inbound_date, "%m/%d/%Y").date() #print(form_inbound_date) inbound_flights = BookingFlights.objects.filter(orig=flights_going_to,dest=flights_leaving_from,dep_date__date=form_inbound_date).annotate(flight_hrs=ExtractHour(ExpressionWrapper(F('arr_date')-F('dep_date'),output_field=fields.DurationField())),flight_mins=ExtractMinute(ExpressionWrapper(F('arr_date')-F('dep_date'),output_field=fields.DurationField()))) #print("Outbound Flights") #print(outbound_flights) #print("Inbound Flights") #print(inbound_flights) return render(request, 'flight-list-view.html',{'flights_out': outbound_flights,'flights_in':inbound_flights, 'adult_pax':adult_pax, 'kids_pax':kids_pax, 'infant_pax':infant_pax,'adult_chck':adult_chck, 'kids_chck':kids_chck,'infant_chck':infant_chck,'total_pax':total_pax,'return_flt':return_flt}) else: #print("Outbound Flights") #print(outbound_flights) return render(request, 'flight-list-view.html',{'flights_out': outbound_flights, 'adult_pax':adult_pax, 'kids_pax':kids_pax, 'infant_pax':infant_pax, 'adult_chck':adult_chck,'kids_chck':kids_chck,'infant_chck':infant_chck,'total_pax':total_pax,'return_flt':return_flt}) else: return render(request, 'login.html')