def compare_aggregate(test_id_1, test_id_2): '''Return comparasion data for all actions in two tests''' compare_data = [] action_data_1 = TestActionAggregateData.objects.annotate( action_name=F('action__url')).filter(test_id=test_id_1).values( 'action_id', 'action_name', 'data',) for action in action_data_1: action_id = action['action_id'] if TestActionAggregateData.objects.filter( action_id=action_id, test_id=test_id_2).exists(): action_data_2 = TestActionAggregateData.objects.annotate( action_name=F('action__url')).filter( action_id=action_id, test_id=test_id_2).values( 'action_id', 'action_name', 'data')[0] compare_data.append({ 'action_name': action['action_name'], 'mean_1': action['data']['mean'], 'mean_2': action_data_2['data']['mean'], 'p50_1': action['data']['50%'], 'p50_2': action_data_2['data']['50%'], 'p90_1': action['data']['90%'], 'p90_2': action_data_2['data']['90%'], 'count_1': action['data']['count'], 'count_2': action_data_2['data']['count'], 'max_1': action['data']['max'], 'max_2': action_data_2['data']['max'], 'min_1': action['data']['min'], 'min_2': action_data_2['data']['min'], 'errors_1': action['data']['errors'], 'errors_2': action_data_2['data']['errors'], }) return compare_data
def __call__(self, request): if request.user.is_authenticated: last_activity = request.session.get('last-activity') too_old_time = timezone.now() - td(seconds=60 * 60) if not last_activity or parse(last_activity) < too_old_time: UserProfile.objects.filter(user=request.user).update( login_last=timezone.now(), login_count=F('login_count') + 1) request.session['last-activity'] = timezone.now().isoformat() response = self.get_response(request) return response
def _load_period_ipstat(fill_date: date, period: timedelta): end_time = timezone.make_aware( time(hour=23, minute=59, second=59, microsecond=999999)) end_date = datetime.combine(fill_date, end_time) start_date = (end_date - period).replace(hour=0, minute=0, second=0, microsecond=0) return Lead.objects \ .filter(session_started__range=(start_date, end_date)) \ .filter(ip_addr__isnull=False) \ .values('ip_addr', 'geo', 'geo__country', 'geo__postal_code', 'provider') \ .annotate(s_cnt=Count('id')) \ .annotate(s_time=Sum(ExpressionWrapper( Coalesce('created', 'last_event_time') - F('session_started'), output_field=DurationField()))) \ .annotate(s0_cnt=Count(Case( When(created__isnull=True, then=F('id')), default=None, output_field=UUIDField()))) \ .annotate(s_beg=Cast(Avg( Cast(F('session_started'), output_field=TimeField()) ), output_field=TimeField())) \ .annotate(user_ids=ArrayAgg('pixel__project__user__id', distinct=True))\ .annotate(cnt_dev=Count('device_id'))
def get_context_data(self, **kwargs): context = super(CommentedDetailView, self).get_context_data(**kwargs) queryset = Comment.objects.filter(page=self.get_comment_page()) context['has_comments'] = queryset.exists() queryset = queryset.select_related('author__user').defer('author__about').annotate(revisions=Count('versions')) if self.request.user.is_authenticated: queryset = queryset.annotate(vote_score=Coalesce(RawSQLColumn(CommentVote, 'score'), Value(0))) profile = self.request.user.profile unique_together_left_join(queryset, CommentVote, 'comment', 'voter', profile.id) context['is_new_user'] = (not self.request.user.is_staff and not profile.submission_set.filter(points=F('problem__points')).exists()) context['comment_list'] = queryset return context
def test_alias_change_in_annotation(self): def make_regions_cte(cte): return Region.objects.filter( parent__name="sun", ).annotate( value=F('name'), ).union( cte.join( Region.objects.all().annotate( value=F('name'), ), parent_id=cte.col.name, ), all=True, ) cte = With.recursive(make_regions_cte) query = cte.queryset().with_cte(cte) exclude_leaves = With(cte.queryset().filter( parent__name='sun', ).annotate( value=Concat(F('name'), F('name')) ), name='value_cte') query = query.annotate( _exclude_leaves=Exists( exclude_leaves.queryset().filter( name=OuterRef("name"), value=OuterRef("value"), ) ) ).filter(_exclude_leaves=True).with_cte(exclude_leaves) print(query.query) # Nothing should be returned. self.assertFalse(query)
def test_order_of_operations(self): # Law of order of operations is followed self. company_query.update( num_chairs=F('num_employees') + 2 * F('num_employees') ) self.assertSequenceEqual( self.company_query, [ { 'num_chairs': 6900, 'name': 'Example Inc.', 'num_employees': 2300 }, { 'num_chairs': 9, 'name': 'Foobar Ltd.', 'num_employees': 3 }, { 'num_chairs': 96, 'name': 'Test GmbH', 'num_employees': 32 } ], )
def test_expressions_in_lookups_join_choice(self): midpoint = datetime.time(13, 0) t1 = Time.objects.create(time=datetime.time(12, 0)) t2 = Time.objects.create(time=datetime.time(14, 0)) SimulationRun.objects.create(start=t1, end=t2, midpoint=midpoint) SimulationRun.objects.create(start=t1, end=None, midpoint=midpoint) SimulationRun.objects.create(start=None, end=t2, midpoint=midpoint) SimulationRun.objects.create(start=None, end=None, midpoint=midpoint) queryset = SimulationRun.objects.filter(midpoint__range=[F('start__time'), F('end__time')]) self.assertQuerysetEqual( queryset, ['<SimulationRun: 13:00:00 (12:00:00 to 14:00:00)>'], ordered=False ) for alias in queryset.query.alias_map.values(): if isinstance(alias, Join): self.assertEqual(alias.join_type, constants.INNER) queryset = SimulationRun.objects.exclude(midpoint__range=[F('start__time'), F('end__time')]) self.assertQuerysetEqual(queryset, [], ordered=False) for alias in queryset.query.alias_map.values(): if isinstance(alias, Join): self.assertEqual(alias.join_type, constants.LOUTER)
def test_object_update_fk(self): # F expressions cannot be used to update attributes which are foreign # keys, or attributes which involve joins. test_gmbh = Company.objects.get(name="Test GmbH") def test(): test_gmbh.point_of_contact = F("ceo") with self.assertRaises(ValueError): test() test_gmbh.point_of_contact = test_gmbh.ceo test_gmbh.save() test_gmbh.name = F("ceo__last_name") with self.assertRaises(FieldError): test_gmbh.save()
def calculate_people_ch(self): if is_clickhouse_enabled(): from ee.clickhouse.models.cohort import recalculate_cohortpeople try: recalculate_cohortpeople(self) self.is_calculating = False self.last_calculation = timezone.now() self.errors_calculating = 0 self.save() except Exception as err: self.is_calculating = False self.errors_calculating = F("errors_calculating") + 1 self.save() capture_exception(err)
def test_parenthesis_priority(self): # Law of order of operations can be overridden by parentheses self.company_query.update( num_chairs=((F('num_employees') + 2) * F('num_employees')) ) self.assertSequenceEqual( self.company_query, [ { 'num_chairs': 5294600, 'name': 'Example Inc.', 'num_employees': 2300 }, { 'num_chairs': 15, 'name': 'Foobar Ltd.', 'num_employees': 3 }, { 'num_chairs': 1088, 'name': 'Test GmbH', 'num_employees': 32 } ], )
def test_update_with_fk(self): # ForeignKey can become updated with the value of another ForeignKey. self.assertEqual( Company.objects.update(point_of_contact=F('ceo')), 3 ) self.assertQuerysetEqual( Company.objects.all(), [ "Joe Smith", "Frank Meyer", "Max Mustermann", ], lambda c: six.text_type(c.point_of_contact), ordered=False )
def test_time_subtraction(self): if connection.features.supports_microsecond_precision: time = datetime.time(12, 30, 15, 2345) timedelta = datetime.timedelta(hours=1, minutes=15, seconds=15, microseconds=2345) else: time = datetime.time(12, 30, 15) timedelta = datetime.timedelta(hours=1, minutes=15, seconds=15) Time.objects.create(time=time) queryset = Time.objects.annotate( difference=ExpressionWrapper( F('time') - Value(datetime.time(11, 15, 0), output_field=models.TimeField()), output_field=models.DurationField(), ) ) self.assertEqual(queryset.get().difference, timedelta)
def tests_compare_report(request, test_id_1, test_id_2): data = Aggregate.objects.raw( """ SELECT a.url as "id", a1.average as "average_1", a2.average as "average_2", a1.average - a2.average as "avg_diff", (((a1.average-a2.average)/a2.average)*100) as "avg_diff_percent", a1.median - a2.median as "median_diff", (((a1.median-a2.median)/a2.median)*100) as "median_diff_percent" FROM (SELECT action_id, average, median FROM jltom.aggregate WHERE test_id = %s) a1, (SELECT action_id, average, median FROM jltom.aggregate WHERE test_id = %s) a2, jltom.action a WHERE a1.action_id = a2.action_id and a.id = a1.action_id """, [test_id_1, test_id_2]) reasonable_percent = 3 reasonable_abs_diff = 5 #ms negatives = [] positives = [] absense = [] for row in data: if row.avg_diff_percent > reasonable_percent: negatives.append(row) elif row.avg_diff_percent < -reasonable_percent: positives.append(row) test_1_actions = list(Aggregate.objects. \ annotate(url=F('action__url')) \ .filter(test_id=test_id_1).values('url')) test_2_actions = list(Aggregate.objects. \ annotate(url=F('action__url')) \ .filter(test_id=test_id_2).values('url')) for url in test_2_actions: if url not in test_1_actions: absense.append(url) return render(request, 'compare_report.html', { 'negatives': negatives, 'positives': positives, 'absense': absense })
def test_f_reuse(self): f = F('id') n = Number.objects.create(integer=-1) c = Company.objects.create(name="Example Inc.", num_employees=2300, num_chairs=5, ceo=Employee.objects.create( firstname="Joe", lastname="Smith")) c_qs = Company.objects.filter(id=f) self.assertEqual(c_qs.get(), c) # Reuse the same F-object for another queryset n_qs = Number.objects.filter(id=f) self.assertEqual(n_qs.get(), n) # The original query still works correctly self.assertEqual(c_qs.get(), c)
def telegram_to_reading(data: str) -> DsmrReading: """ Converts a P1 telegram to a DSMR reading, which will be stored in database. """ params = get_dsmr_connection_parameters() parser = TelegramParser(params['specifications']) logger.debug("Received telegram:\n%s", data) try: parsed_telegram = parser.parse(data) except (InvalidChecksumError, ParseError) as error: # Hook to keep track of failed readings count. MeterStatistics.objects.all().update( rejected_telegrams=F('rejected_telegrams') + 1) logger.warning('Rejected telegram: %s', error) raise InvalidTelegramError(error) from error return _map_telegram_to_model(parsed_telegram=parsed_telegram, data=data)
def populate_users_list(self): queryset = User.objects.filter(is_active=False) self.__update_label(queryset) if queryset.exists(): self._list.value = queryset.annotate( email_confirmed=F("emailaddress__verified") ) self.__show_actions() self.__disable_actions() else: self._list.hide() self._edit.hide() self._approve.hide() self._remove.hide()
def overall_status(self): # chord_unlock doesn't use the result backed, so best to ignore subtasks = self.family.exclude(type='celery.chord_unlock') subtasks = subtasks.annotate(_status=F('result__status')) # If we are to get distinct _status values, we must order_by('_status') subtasks = subtasks.order_by('_status') statuses = subtasks.values_list('_status', flat=True) statuses = statuses.distinct() num_statuses = len(statuses) if num_statuses > 1: if 'FAILURE' in statuses: return 'FAILURE' return 'STARTED' # It's possible for all statuses to equal None, in which case we # can call them 'PENDING'. return statuses[0] or 'PENDING'
def calculate_people_ch(self): if is_clickhouse_enabled(): from ee.clickhouse.models.cohort import recalculate_cohortpeople from posthog.tasks.calculate_cohort import calculate_cohort try: recalculate_cohortpeople(self) calculate_cohort(self.id) self.last_calculation = timezone.now() self.errors_calculating = 0 except Exception as e: self.errors_calculating = F("errors_calculating") + 1 raise e finally: self.is_calculating = False self.save()
def get_formset(self, request, obj=None, **kwargs): formset = super(UserApplicationInline, self).get_formset(request, obj, **kwargs) if obj: formset.form.declared_fields['company_license'].queryset = ( CompanyApplicationLicense.objects.annotate( num_users=Count('userapplicationlicense') ).filter( active=True, company=obj.company ).exclude( ~Q(userapplicationlicense__user=obj), license__max_users__lte=F('num_users'), ) ) return formset
def generate_overall_report(request): content = "" projects_all = list(Project.objects.values()) for project in projects_all: if project['show']: project_id = int(project['id']) a = Aggregate.objects.annotate(test_name=F('test__display_name')) \ .filter(test__project__id=project_id, test__show=True) \ .values('test_name') \ .annotate(Average=Avg('average')) \ .annotate(Median=Avg('median')) \ .order_by('test__start_time')[10:] content += generate_confluence_graph(project, list(a)) response = post_to_confluence(content) return JsonResponse(response, safe=False)
def get_queryset(self): query = self.request.query_params.get('query') if query is None: return Course.objects.none() return Course.objects.annotate( exact_rank=Case(When(course_code__iexact=query, then=Value(1)), When(title__iexact=query, then=Value(1)), default=Value(0), output_field=IntegerField()), full_text_rank=SearchRank(F('search_vector'), SearchQuery(query)), fuzzy_rank=TrigramSimilarity('course_code', query)).filter( Q(exact_rank=1) | Q(full_text_rank__gte=0.1) | Q(fuzzy_rank__gte=0.3)).order_by('-exact_rank', '-full_text_rank', '-fuzzy_rank', 'course_code')
def step03(request, previewMode=False): url_list = [] assignmentId = request.GET.get('assignmentId') feature = Feature.objects.filter(is_bias__gt=-3).order_by('feature') feature_list = list(feature.values_list('feature', flat=True)) print(feature_list) if request.method == 'POST': result = int(request.POST.get('data')) round = int(request.POST.get('round')) Feature.objects.filter(feature=feature_list[round]).update( count=F('count') + result) print("round:", round, " feature:", feature_list[round], " post result:", result, len(feature_list) - 1) if round < len(feature_list) - 1: return HttpResponse(status=201) else: return render(request, 'feedback.html') else: # Get rounds played in total and by the current player rounds, roundsnum = popGetList(ImageModel.objects.filter( img__startswith=KEYRING).values_list('id', flat=True), count=21, phase=3) if len(rounds.post) >= ImageModel.objects.filter( img__startswith=KEYRING).count(): # push all to waiting page return over(request, 'step03') # Single image that will be sent to front-end, will expire in 300 seconds (temporary) # sending 21 images at a time data = [i.img.url for i in ImageModel.objects.filter(id__in=roundsnum)] data.extend([None] * (21 - len(data))) instructions = Phase03_instruction.get_queryset( Phase03_instruction) or ['none'] return render( request, 'step03.html', { 'feature': feature_list, 'image_url': data, 'roundnum': len(feature_list), 'previewMode': previewMode, 'instructions': instructions })
def test_alias_as_subquery(self): # This test covers CTEColumnRef.relabeled_clone def make_regions_cte(cte): return KeyPair.objects.filter( parent__key="level 1", ).annotate( rank=F('value'), ).union( cte.join( KeyPair.objects.all().order_by(), parent_id=cte.col.id, ).annotate( rank=F('value'), ), all=True, ) cte = With.recursive(make_regions_cte) children = cte.queryset().with_cte(cte) xdups = With(cte.queryset().filter( parent__key="level 1", ).annotate( rank=F('value') ).values('id', 'rank'), name='xdups') children = children.annotate( _exclude=Exists( ( xdups.queryset().filter( id=OuterRef("id"), rank=OuterRef("rank"), ) ) ) ).filter(_exclude=True).with_cte(xdups) print(children.query) query = KeyPair.objects.filter(parent__in=children) print(query.query) print(children.query) self.assertEqual(query.get().key, 'level 3') # Tests the case in which children's query was modified since it was # used in a subquery to define `query` above. self.assertEqual( list(c.key for c in children), ['level 2', 'level 2'] )
def get_select(request): user = request.user if request.method == 'GET': response_query = UserSelected.objects.filter( select_user=user).annotate( DT_RowId=F('pk')).select_related('select_collection') response_query = response_query.annotate( id_ins=F('select_collection__id_ins')) response_query = response_query.annotate( id_collectie=F('select_collection__id_collectie')) response_query = response_query.annotate( first_date=F('select_collection__first_date')) response_query = response_query.annotate( id_storidge=F('select_collection__id_storidge')) response_query = response_query.annotate( colonynumber=F('select_collection__colonynumber')) response_query = response_query.annotate( pathogen__given_name=F('select_collection__pathogen__given_name')) return JsonResponse( {'data': [entry for entry in response_query.values()]}) else: # request.method == 'POST': command = request.POST data = {} action = '' #get actions for key, value in command.items(): split_key = [x.strip(']') for x in key.split('[')] if split_key[0] == 'data': data.setdefault(split_key[1], {})[split_key[2]] = value elif split_key[0] == 'action': action = value #do actions response_json = {} if action == 'remove': for key, value in data.items(): UserSelected.objects.get(pk=value['id']).delete() elif action == 'add': for key, value in data.items(): isolate = Collection.objects.get(pk=value['collection_id']) if not UserSelected.objects.filter( select_user=user, select_collection=isolate).exists(): UserSelected.objects.create(select_user=user, select_collection=isolate) else: response_json.setdefault('alreaddy_selected', []).append(value['collection_id']) response_json['selected_count'] = UserSelected.objects.filter( select_user=user).count() return JsonResponse(response_json)
def test_filter_inter_attribute(self): # We can filter on attribute relationships on same model obj, e.g. # find companies where the number of employees is greater # than the number of chairs. self.assertQuerysetEqual( self.company_query.filter(num_employees__gt=F("num_chairs")), [ { "num_chairs": 5, "name": "Example Inc.", "num_employees": 2300, }, { "num_chairs": 1, "name": "Test GmbH", "num_employees": 32 }, ], lambda o: o)
def test_update(self): # We can set one field to have the value of another field # Make sure we have enough chairs self.company_query.update(num_chairs=F("num_employees")) self.assertQuerysetEqual(self.company_query, [{ "num_chairs": 2300, "name": "Example Inc.", "num_employees": 2300 }, { "num_chairs": 3, "name": "Foobar Ltd.", "num_employees": 3 }, { "num_chairs": 32, "name": "Test GmbH", "num_employees": 32 }], lambda o: o)
def test_cte_queryset(self): sub_totals = With( Order.objects.values( region_parent=F("region__parent_id")).annotate( total=Sum("amount")), ) regions = (Region.objects.all().with_cte(sub_totals).annotate( child_regions_total=Subquery( sub_totals.queryset().filter(region_parent=OuterRef( "name")).values("total")), ).order_by("name")) data = [(r.name, r.child_regions_total) for r in regions] self.assertEqual(data, [("bernard's star", None), ('deimos', None), ('earth', 6), ('mars', None), ('mercury', None), ('moon', None), ('phobos', None), ('proxima centauri', 33), ('proxima centauri b', None), ('sun', 368), ('venus', None)])
def get_queryset(self): public_result_queryset = CompendiumResult.objects.filter( result__is_public=True) latest_version = self.request.query_params.get("latest_version", False) if latest_version: version_filter = Q( primary_organism=OuterRef("primary_organism"), quant_sf_only=OuterRef("quant_sf_only"), ) latest_version = ( public_result_queryset.filter(version_filter).order_by( "-compendium_version").values("compendium_version")) return public_result_queryset.annotate( latest_version=Subquery(latest_version[:1])).filter( compendium_version=F("latest_version")) return public_result_queryset
def get_queryset(self): movies = Movie.objects.all() dense_rank = Window(expression=DenseRank(), order_by=F('total_comments').desc()) movies_sort = movies.annotate(total_comments=Count( 'Comments')).order_by('-total_comments').annotate(rank=dense_rank) return movies_sort if self.request.query_params: date_start = self.request.query_params.get('date_start') date_end = self.request.query_params.get('date_end') sorted_date_movies = movies_sort.filter( Comments__pub_date__gte=date_start, Comments__pub_date__lte=date_end) return sorted_date_movies
def vote(request, question_id): question = get_object_or_404(Question, pk=question_id) try: selected_choice = question.choice_set.get(pk=request.POST['choice']) except (KeyError, Choice.DoesNotExist): return render(request, 'polls/detail.html', { 'question': question, 'error_message': "You didn't select a choice" }) else: selected_choice.votes = F('votes') + 1 selected_choice.save() # Always return an HttpResponseRedirect after successfully dealing # with POST data. This prevents data from being posted twice if a # user hits the Back button. return HttpResponseRedirect( reverse('polls:results', args=(question.id, )))