def get_queryset(self): startday = self.request.query_params.get('startday') endday = get_end_of_day(self.request.query_params.get('endday').encode('utf-8')) repo = list(get_repository(self.request.query_params.get('tree'))) bug_id = int(self.request.query_params.get('bug')) queryset = (BugJobMap.failures.default(repo, startday, endday) .by_bug(bug_id) .values('job__repository__name', 'job__machine_platform__platform', 'bug_id', 'job_id', 'job__push__time', 'job__push__revision', 'job__signature__job_type_name', 'job__option_collection_hash', 'job__machine__name').order_by('-job__push__time')) hash_list = [] for item in queryset: match = filter(lambda x: item['job__option_collection_hash'] == x, hash_list) if not match: hash_list.append(item['job__option_collection_hash']) hash_query = (OptionCollection.objects.filter(option_collection_hash__in=hash_list) .select_related('option') .values('option__name', 'option_collection_hash')) for item in queryset: # Casting to list since Python 3's `filter` produces an iterator # rather than a list, which is not subscriptable. match = list(filter(lambda x: item['job__option_collection_hash'] == x['option_collection_hash'], hash_query)) if match: item['build_type'] = match[0]['option__name'] else: item['build_type'] = 'unknown' return queryset
def get_queryset(self): startday = self.request.query_params.get('startday') endday = get_end_of_day(self.request.query_params.get('endday').encode('utf-8')) repo = list(get_repository(self.request.query_params.get('tree'))) bug_id = int(self.request.query_params.get('bug')) queryset = BugJobMap.objects.filter(bug_id=bug_id, job__repository_id__in=repo, job__push__time__range=(startday, endday) ).select_related('job', 'push').values( 'bug_id', 'job_id', 'job__push__time', 'job__machine_platform__platform', 'job__repository__name', 'job__push__revision', 'job__signature__job_type_name', 'job__option_collection_hash', ).order_by('-job__push__time') hash_list = [] for item in queryset: match = filter(lambda x: item['job__option_collection_hash'] == x, hash_list) if len(match) == 0: hash_list.append(item['job__option_collection_hash']) hash_query = OptionCollection.objects.filter(option_collection_hash__in=hash_list).select_related( 'option').values('option__name', 'option_collection_hash') for item in queryset: match = filter(lambda x: item['job__option_collection_hash'] == x['option_collection_hash'], hash_query) if len(match) > 0: item['build_type'] = match[0]['option__name'] else: item['build_type'] = 'unknown' return queryset
def get_queryset(self): startday = self.request.query_params.get('startday') endday = get_end_of_day(self.request.query_params.get('endday').encode('utf-8')) repo = list(get_repository(self.request.query_params.get('tree'))) bug_id = int(self.request.query_params.get('bug')) queryset = (BugJobMap.failures.default(repo, startday, endday) .by_bug(bug_id) .values('job__repository__name', 'job__machine_platform__platform', 'bug_id', 'job_id', 'job__push__time', 'job__push__revision', 'job__signature__job_type_name', 'job__option_collection_hash') .order_by('-job__push__time')) hash_list = [] for item in queryset: match = filter(lambda x: item['job__option_collection_hash'] == x, hash_list) if not match: hash_list.append(item['job__option_collection_hash']) hash_query = (OptionCollection.objects.filter(option_collection_hash__in=hash_list) .select_related('option') .values('option__name', 'option_collection_hash')) for item in queryset: # Casting to list since Python 3's `filter` produces an iterator # rather than a list, which is not subscriptable. match = list(filter(lambda x: item['job__option_collection_hash'] == x['option_collection_hash'], hash_query)) if match: item['build_type'] = match[0]['option__name'] else: item['build_type'] = 'unknown' return queryset
def get_test_runs(self, startday, endday): """Returns an aggregate of pushes for specified date range and repository.""" test_runs = (Push.objects.filter( repository_id__in=get_repository('all'), time__range=(startday, endday)).aggregate(Count('author'))) return test_runs['author__count']
def list(self, request): query_params = FailuresQueryParamsSerializer(data=request.query_params, context='requireBug') if not query_params.is_valid(): return Response(data=query_params.errors, status=HTTP_400_BAD_REQUEST) startday = query_params.validated_data['startday'] endday = get_end_of_day(query_params.validated_data['endday']) repo = list(get_repository(query_params.validated_data['tree'])) bug_id = query_params.validated_data['bug'] self.queryset = (BugJobMap.failures.default( repo, startday, endday).by_bug(bug_id).values( 'job__repository__name', 'job__machine_platform__platform', 'bug_id', 'job_id', 'job__push__time', 'job__push__revision', 'job__signature__job_type_name', 'job__option_collection_hash', 'job__machine__name').order_by('-job__push__time')) lines = (TextLogError.objects.filter( step__job_id__in=self.queryset.values_list('job_id', flat=True), line__contains='TEST-UNEXPECTED-FAIL').values_list( 'step__job_id', 'line')) grouped_lines = defaultdict(list) for job_id, line in lines: if line is not None: grouped_lines[job_id].append(line) hash_list = [] for item in self.queryset: item['lines'] = grouped_lines.get(item['job_id'], []) match = filter(lambda x: item['job__option_collection_hash'] == x, hash_list) if not match: hash_list.append(item['job__option_collection_hash']) hash_query = (OptionCollection.objects.filter( option_collection_hash__in=hash_list).select_related( 'option').values('option__name', 'option_collection_hash')) for item in self.queryset: # Casting to list since Python 3's `filter` produces an iterator # rather than a list, which is not subscriptable. match = list( filter( lambda x: item['job__option_collection_hash'] == x[ 'option_collection_hash'], hash_query)) if match: item['build_type'] = match[0]['option__name'] else: item['build_type'] = 'unknown' serializer = self.get_serializer(self.paginate_queryset(self.queryset), many=True) return self.get_paginated_response(serializer.data)
def get_queryset(self): startday = self.request.query_params.get('startday') endday = get_end_of_day(self.request.query_params.get('endday').encode('utf-8')) repo = list(get_repository(self.request.query_params.get('tree'))) queryset = (BugJobMap.failures.default(repo, startday, endday) .values('bug_id') .annotate(bug_count=Count('job_id')) .values('bug_id', 'bug_count') .order_by('-bug_count')) return queryset
def get_queryset(self): startday = self.request.query_params.get('startday') endday = get_end_of_day(self.request.query_params.get('endday').encode('utf-8')) repo = list(get_repository(self.request.query_params.get('tree'))) queryset = BugJobMap.objects.filter(job__repository_id__in=repo, job__push__time__range=(startday, endday), job__failure_classification__id=4 ).select_related('push').values('bug_id').annotate( bug_count=Count('job_id')).values('bug_id', 'bug_count').order_by( '-bug_count') return queryset
def get_queryset(self): startday = self.request.query_params.get('startday') endday = get_end_of_day( self.request.query_params.get('endday').encode('utf-8')) repo = list(get_repository(self.request.query_params.get('tree'))) queryset = (BugJobMap.failures.default( repo, startday, endday).values('bug_id').annotate( bug_count=Count('job_id')).values( 'bug_id', 'bug_count').order_by('-bug_count')) return queryset
def list(self, request): query_params = FailuresQueryParamsSerializer(data=request.query_params) if not query_params.is_valid(): return Response(data=query_params.errors, status=HTTP_400_BAD_REQUEST) startday = query_params.validated_data['startday'] endday = get_end_of_day(query_params.validated_data['endday']) repo = list(get_repository(query_params.validated_data['tree'])) bug_id = query_params.validated_data['bug'] push_query = (Push.objects.filter( repository_id__in=repo, time__range=(startday, endday)).annotate( date=TruncDate('time')).values('date').annotate( test_runs=Count('author')).order_by('date').values( 'date', 'test_runs')) if bug_id: job_query = (BugJobMap.failures.default( repo, startday, endday).by_bug(bug_id).annotate( date=TruncDate('job__push__time')).values('date').annotate( failure_count=Count('id')).order_by('date').values( 'date', 'failure_count')) else: job_query = (Job.objects.filter( push__time__range=(startday, endday), repository_id__in=repo, failure_classification_id=4).select_related('push').annotate( date=TruncDate('push__time')).values('date').annotate( failure_count=Count('id')).order_by('date').values( 'date', 'failure_count')) # merges the push_query and job_query results into a list; if a date is found in both queries, # update the job_query with the test_run count, if a date is in push_query but not job_query, # add a new object with push_query data and a default for failure_count self.queryset = [] for push in push_query: # Casting to list since Python 3's `filter` produces an iterator # rather than a list, which is not subscriptable. match = list(filter(lambda x: push['date'] == x['date'], job_query)) if match: match[0]['test_runs'] = push['test_runs'] self.queryset.append(match[0]) else: self.queryset.append({ 'date': push['date'], 'test_runs': push['test_runs'], 'failure_count': 0 }) serializer = self.get_serializer(self.queryset, many=True) return Response(serializer.data)
def get_queryset(self): startday = self.request.query_params.get('startday') endday = get_end_of_day( self.request.query_params.get('endday').encode('utf-8')) repo = list(get_repository(self.request.query_params.get('tree'))) queryset = BugJobMap.objects.filter( job__repository_id__in=repo, job__push__time__range=(startday, endday), job__failure_classification__id=4).select_related('push').values( 'bug_id').annotate(bug_count=Count('job_id')).values( 'bug_id', 'bug_count').order_by('-bug_count') return queryset
def get_queryset(self): startday = self.request.query_params.get('startday') endday = get_end_of_day( self.request.query_params.get('endday').encode('utf-8')) repo = list(get_repository(self.request.query_params.get('tree'))) bug_id = self.request.query_params.get('bug') push_query = Push.objects.filter( repository_id__in=repo, time__range=(startday, endday)).annotate( date=TruncDate('time')).values('date').annotate( test_runs=Count('author')).order_by('date').values( 'date', 'test_runs') if bug_id: job_query = BugJobMap.objects.filter(job__repository_id__in=repo, job__push__time__range=(startday, endday), job__failure_classification__id=4, bug_id=int(bug_id) ).select_related('push').annotate(date=TruncDate('job__push__time'))\ .values('date').annotate(failure_count=Count('id')).order_by( 'date').values('date', 'failure_count') else: job_query = Job.objects.filter( push__time__range=(startday, endday), repository_id__in=repo, failure_classification_id=4).select_related('push').annotate( date=TruncDate('push__time')).values('date').annotate( failure_count=Count('id')).order_by('date').values( 'date', 'failure_count') # merges the push_query and job_query results into a list; if a date is found in both queries, # update the job_query with the test_run count, if a date is in push_query but not job_query, # add a new object with push_query data and a default for failure_count queryset = [] for push in push_query: # Casting to list since Python 3's `filter` produces an iterator # rather than a list, which is not subscriptable. match = list(filter(lambda x: push['date'] == x['date'], job_query)) if match: match[0]['test_runs'] = push['test_runs'] queryset.append(match[0]) else: queryset.append({ 'date': push['date'], 'test_runs': push['test_runs'], 'failure_count': 0 }) return queryset
def get_bug_stats(self, startday, endday): """Get all intermittent failures per specified date range and repository, returning a dict of bug_id's with total, repository and platform totals if totals are greater than or equal to the threshold. eg: { "1206327": { "total": 5, "per_repository": { "fx-team": 2, "mozilla-inbound": 3 }, "per_platform": { "osx-10-10": 4, "b2g-emu-ics": 1 } }, ... } """ # Min required failures per bug in order to post a comment threshold = 1 if self.weekly_mode else 15 bugs = (BugJobMap.failures.default( get_repository('all'), startday, endday).values('job__repository__name', 'job__machine_platform__platform', 'bug_id')) bug_map = dict() for bug in bugs: platform = bug['job__machine_platform__platform'] repo = bug['job__repository__name'] bug_id = bug['bug_id'] if bug_id in bug_map: bug_map[bug_id]['total'] += 1 bug_map[bug_id]['per_platform'][platform] += 1 bug_map[bug_id]['per_repository'][repo] += 1 else: bug_map[bug_id] = {} bug_map[bug_id]['total'] = 1 bug_map[bug_id]['per_platform'] = Counter([platform]) bug_map[bug_id]['per_repository'] = Counter([repo]) return { key: value for key, value in iteritems(bug_map) if value['total'] >= threshold }
def get_queryset(self): startday = self.request.query_params.get('startday') endday = get_end_of_day(self.request.query_params.get('endday').encode('utf-8')) repo = list(get_repository(self.request.query_params.get('tree'))) bug_id = int(self.request.query_params.get('bug')) queryset = (BugJobMap.failures.default(repo, startday, endday) .by_bug(bug_id) .values('job__repository__name', 'job__machine_platform__platform', 'bug_id', 'job_id', 'job__push__time', 'job__push__revision', 'job__signature__job_type_name', 'job__option_collection_hash', 'job__machine__name') .order_by('-job__push__time')) lines = (TextLogError.objects.filter(step__job_id__in=queryset.values_list('job_id', flat=True), line__contains='TEST-UNEXPECTED-FAIL') .values_list('step__job_id', 'line')) grouped_lines = defaultdict(list) for job_id, line in lines: if line is not None: grouped_lines[job_id].append(line) hash_list = [] for item in queryset: item['lines'] = grouped_lines.get(item['job_id'], []) match = filter(lambda x: item['job__option_collection_hash'] == x, hash_list) if not match: hash_list.append(item['job__option_collection_hash']) hash_query = (OptionCollection.objects.filter(option_collection_hash__in=hash_list) .select_related('option') .values('option__name', 'option_collection_hash')) for item in queryset: # Casting to list since Python 3's `filter` produces an iterator # rather than a list, which is not subscriptable. match = list(filter(lambda x: item['job__option_collection_hash'] == x['option_collection_hash'], hash_query)) if match: item['build_type'] = match[0]['option__name'] else: item['build_type'] = 'unknown' return queryset
def get_queryset(self): startday = self.request.query_params.get('startday') endday = get_end_of_day(self.request.query_params.get('endday').encode('utf-8')) repo = list(get_repository(self.request.query_params.get('tree'))) bug_id = self.request.query_params.get('bug') push_query = (Push.objects.filter(repository_id__in=repo, time__range=(startday, endday)) .annotate(date=TruncDate('time')) .values('date') .annotate(test_runs=Count('author')) .order_by('date') .values('date', 'test_runs')) if bug_id: job_query = (BugJobMap.failures.default(repo, startday, endday) .by_bug(bug_id) .annotate(date=TruncDate('job__push__time')) .values('date') .annotate(failure_count=Count('id')) .order_by('date') .values('date', 'failure_count')) else: job_query = (Job.objects.filter(push__time__range=(startday, endday), repository_id__in=repo, failure_classification_id=4) .select_related('push') .annotate(date=TruncDate('push__time')) .values('date') .annotate(failure_count=Count('id')) .order_by('date') .values('date', 'failure_count')) # merges the push_query and job_query results into a list; if a date is found in both queries, # update the job_query with the test_run count, if a date is in push_query but not job_query, # add a new object with push_query data and a default for failure_count queryset = [] for push in push_query: # Casting to list since Python 3's `filter` produces an iterator # rather than a list, which is not subscriptable. match = list(filter(lambda x: push['date'] == x['date'], job_query)) if match: match[0]['test_runs'] = push['test_runs'] queryset.append(match[0]) else: queryset.append({'date': push['date'], 'test_runs': push['test_runs'], 'failure_count': 0}) return queryset
def list(self, request): query_params = FailuresQueryParamsSerializer(data=request.query_params) if not query_params.is_valid(): return Response(data=query_params.errors, status=HTTP_400_BAD_REQUEST) startday = query_params.validated_data['startday'] endday = get_end_of_day(query_params.validated_data['endday']) repo = list(get_repository(query_params.validated_data['tree'])) self.queryset = (BugJobMap.failures.default( repo, startday, endday).values('bug_id').annotate( bug_count=Count('job_id')).values( 'bug_id', 'bug_count').order_by('-bug_count')) serializer = self.get_serializer(self.paginate_queryset(self.queryset), many=True) return self.get_paginated_response(serializer.data)
def get_queryset(self): startday = self.request.query_params.get('startday') endday = get_end_of_day( self.request.query_params.get('endday').encode('utf-8')) repo = list(get_repository(self.request.query_params.get('tree'))) bug_id = int(self.request.query_params.get('bug')) queryset = BugJobMap.objects.filter( bug_id=bug_id, job__repository_id__in=repo, job__push__time__range=(startday, endday)).select_related( 'job', 'push').values( 'bug_id', 'job_id', 'job__push__time', 'job__machine_platform__platform', 'job__repository__name', 'job__push__revision', 'job__signature__job_type_name', 'job__option_collection_hash', ).order_by('-job__push__time') hash_list = [] for item in queryset: match = filter(lambda x: item['job__option_collection_hash'] == x, hash_list) if len(match) == 0: hash_list.append(item['job__option_collection_hash']) hash_query = OptionCollection.objects.filter( option_collection_hash__in=hash_list).select_related( 'option').values('option__name', 'option_collection_hash') for item in queryset: match = filter( lambda x: item['job__option_collection_hash'] == x[ 'option_collection_hash'], hash_query) if len(match) > 0: item['build_type'] = match[0]['option__name'] else: item['build_type'] = 'unknown' return queryset
def get_queryset(self): startday = self.request.query_params.get('startday') endday = get_end_of_day(self.request.query_params.get('endday').encode('utf-8')) repo = list(get_repository(self.request.query_params.get('tree'))) bug_id = self.request.query_params.get('bug') push_query = Push.objects.filter(repository_id__in=repo, time__range=(startday, endday)).annotate(date=TruncDate('time')).values( 'date').annotate(test_runs=Count('author')).order_by('date').values( 'date', 'test_runs') if bug_id: job_query = BugJobMap.objects.filter(job__repository_id__in=repo, job__push__time__range=(startday, endday), job__failure_classification__id=4, bug_id=int(bug_id) ).select_related('push').annotate(date=TruncDate('job__push__time'))\ .values('date').annotate(failure_count=Count('id')).order_by( 'date').values('date', 'failure_count') else: job_query = Job.objects.filter(push__time__range=(startday, endday), repository_id__in=repo, failure_classification_id=4).select_related('push').annotate( date=TruncDate('push__time')).values('date').annotate( failure_count=Count('id')).order_by('date').values('date', 'failure_count') # merges the push_query and job_query results into a list; if a date is found in both queries, # update the job_query with the test_run count, if a date is in push_query but not job_query, # add a new object with push_query data and a default for failure_count queryset = [] for push in push_query: match = filter(lambda x: push['date'] == x['date'], job_query) if len(match) > 0: match[0]['test_runs'] = push['test_runs'] queryset.append(match[0]) else: queryset.append({'date': push['date'], 'test_runs': push['test_runs'], 'failure_count': 0}) return queryset