def image_report_list(request): imagesets = ImageSet.objects.filter() imagesets_data = [] for imageset in imagesets: images_data = [] for image in imageset.images.all(): # Migration hack: Image.filter cannot be auto populated, so ignore # images that have not been migrated to filters for now. if image.filter: filter_data = image.filter.as_data() image_data = { 'name': image.name, 'bundle_count': evaluate_filter(request.user, filter_data).count(), 'link': image.name, } images_data.append(image_data) images_data.sort(key=lambda d: d['name']) imageset_data = { 'name': imageset.name, 'images': images_data, } imagesets_data.append(imageset_data) imagesets_data.sort(key=lambda d: d['name']) return render_to_response( "dashboard_app/image-reports.html", { 'bread_crumb_trail': BreadCrumbTrail.leading_to(image_report_list), 'imagesets': imagesets_data, }, RequestContext(request))
def compare_filter_matches(request, filter_data, tag1, tag2): matches = evaluate_filter(request.user, filter_data) match1, match2 = matches.with_tags(tag1, tag2) test_cases_for_test_id = {} for test in filter_data['tests']: test_cases = test['test_cases'] if test_cases: test_cases = set([tc.test_case_id for tc in test_cases]) else: test_cases = None test_cases_for_test_id[test['test'].test_id] = test_cases test_run_info = [] def key(tr): return tr.test.test_id for key, tr1, tr2 in _iter_matching(match1.test_runs, match2.test_runs, key): if tr1 is None: table = None only = 'right' tr = tr2 tag = tag2 cases = None elif tr2 is None: table = None only = 'left' tr = tr1 tag = tag1 cases = None else: only = None tr = None tag = None cases = test_cases_for_test_id.get(key) test_result_differences = _test_run_difference(tr1, tr2, cases) if test_result_differences: table = TestResultDifferenceTable(test_result_differences, prefix=key) table.base_columns['first_result'].verbose_name = mark_safe( '<a href="%s">build %s: %s</a>' % (tr1.get_absolute_url(), escape(tag1), escape(key))) table.base_columns['second_result'].verbose_name = mark_safe( '<a href="%s">build %s: %s</a>' % (tr2.get_absolute_url(), escape(tag2), escape(key))) RequestConfig(request, paginate={ "per_page": table.length }).configure(table) else: table = None if cases: cases = sorted(cases) if len(cases) > 1: cases = ', '.join(cases[:-1]) + ' or ' + cases[-1] else: cases = cases[0] test_run_info.append( dict(only=only, key=key, table=table, tr=tr, tag=tag, cases=cases)) return test_run_info
def compare_filter_matches(request, filter_data, tag1, tag2): matches = evaluate_filter(request.user, filter_data) match1, match2 = matches.with_tags(tag1, tag2) test_cases_for_test_id = {} for test in filter_data['tests']: test_cases = test['test_cases'] if test_cases: test_cases = set([tc.test_case_id for tc in test_cases]) else: test_cases = None test_cases_for_test_id[test['test'].test_id] = test_cases test_run_info = [] def key(tr): return tr.test.test_id for key, tr1, tr2 in _iter_matching(match1.test_runs, match2.test_runs, key): if tr1 is None: table = None only = 'right' tr = tr2 tag = tag2 cases = None elif tr2 is None: table = None only = 'left' tr = tr1 tag = tag1 cases = None else: only = None tr = None tag = None cases = test_cases_for_test_id.get(key) test_result_differences = _test_run_difference(tr1, tr2, cases) if test_result_differences: table = TestResultDifferenceTable(test_result_differences, prefix=key) table.base_columns['first_result'].verbose_name = mark_safe( '<a href="%s">build %s: %s</a>' % ( tr1.get_absolute_url(), escape(tag1), escape(key))) table.base_columns['second_result'].verbose_name = mark_safe( '<a href="%s">build %s: %s</a>' % ( tr2.get_absolute_url(), escape(tag2), escape(key))) RequestConfig(request, paginate={"per_page": table.length}).configure(table) else: table = None if cases: cases = sorted(cases) if len(cases) > 1: cases = ', '.join(cases[:-1]) + ' or ' + cases[-1] else: cases = cases[0] test_run_info.append(dict( only=only, key=key, table=table, tr=tr, tag=tag, cases=cases)) return test_run_info
def get_filter_results(self, filter_name, count=10, offset=0): """ Name ---- :: get_filter_results(filter_name, count=10, offset=0) Deprecated ---------- This function will cease to operate when V1 is disabled. Description ----------- Return information about the test runs and results that a given filter matches. Arguments --------- ``filter_name``: The name of a filter in the format ~owner/name. ``count``: The maximum number of matches to return. ``offset``: Skip over this many results. Return value ------------ A list of "filter matches". A filter match describes the results of matching a filter against one or more test runs:: { 'tag': either a stringified date (bundle__uploaded_on) or a build number 'test_runs': [{ 'test_id': test_id 'link': link-to-test-run, 'passes': int, 'fails': int, 'skips': int, 'total': int, # only present if filter specifies cases for this test: 'specific_results': [{ 'test_case_id': test_case_id, 'link': link-to-test-result, 'result': pass/fail/skip/unknown, 'measurement': string-containing-decimal-or-None, 'units': units, }], }] # Only present if filter does not specify tests: 'pass_count': int, 'fail_count': int, } """ filter_data = self._get_filter_data(filter_name) matches = evaluate_filter(self.user, filter_data, descending=False) matches = matches[offset:offset + count] return [match.serializable() for match in matches]
def is_pass_table(self): if not self.match_maker: self.match_maker = evaluate_filter(self.request.user, self.filter_object.as_data()) if self.match_maker.filter_data['tests']: self.table_class = FilterPassTable return True self.table_class = FilterSummaryTable return False
def get_filter_results(self, filter_name, count=10, offset=0): """ Name ---- :: get_filter_results(filter_name, count=10, offset=0) Description ----------- Return information about the test runs and results that a given filter matches. Arguments --------- ``filter_name``: The name of a filter in the format ~owner/name. ``count``: The maximum number of matches to return. ``offset``: Skip over this many results. Return value ------------ A list of "filter matches". A filter match describes the results of matching a filter against one or more test runs:: { 'tag': either a stringified date (bundle__uploaded_on) or a build number 'test_runs': [{ 'test_id': test_id 'link': link-to-test-run, 'passes': int, 'fails': int, 'skips': int, 'total': int, # only present if filter specifies cases for this test: 'specific_results': [{ 'test_case_id': test_case_id, 'link': link-to-test-result, 'result': pass/fail/skip/unknown, 'measurement': string-containing-decimal-or-None, 'units': units, }], }] # Only present if filter does not specify tests: 'pass_count': int, 'fail_count': int, } """ filter_data = self._get_filter_data(filter_name) matches = evaluate_filter(self.user, filter_data, descending=False) matches = matches[offset:offset + count] return [match.serializable() for match in matches]
def image_report_detail(request, name): image = Image.objects.get(name=name) filter_data = image.filter.as_data() matches = evaluate_filter(request.user, filter_data, prefetch_related=['bug_links', 'test_results'])[:50] build_number_to_cols = {} test_run_names = set() for match in matches: for test_run in match.test_runs: name = test_run.test.test_id denorm = test_run.denormalization if denorm.count_fail == 0: cls = 'present pass' else: cls = 'present fail' bug_links = sorted([b.bug_link for b in test_run.bug_links.all()]) measurements = [{ 'measurement': str(item.measurement) } for item in test_run.test_results.all()] test_run_data = dict( present=True, cls=cls, uuid=test_run.analyzer_assigned_uuid, passes=denorm.count_pass, total=denorm.count_pass + denorm.count_fail, link=test_run.get_permalink(), bug_links=bug_links, measurements=measurements, ) if (match.tag, test_run.bundle.uploaded_on) not in build_number_to_cols: build_number_to_cols[(match.tag, test_run.bundle.uploaded_on)] = { 'test_runs': {}, 'number': str(match.tag), 'date': str(test_run.bundle.uploaded_on), 'link': test_run.bundle.get_absolute_url(), } build_number_to_cols[(match.tag, test_run.bundle.uploaded_on )]['test_runs'][name] = test_run_data if name != 'lava': test_run_names.add(name) test_run_names = sorted(test_run_names) test_run_names.insert(0, 'lava') cols = [c for n, c in sorted(build_number_to_cols.items())] table_data = {} for test_run_name in test_run_names: row_data = [] for col in cols: test_run_data = col['test_runs'].get(test_run_name) if not test_run_data: test_run_data = dict( present=False, cls='missing', ) row_data.append(test_run_data) table_data[test_run_name] = row_data return render_to_response( "dashboard_app/image-report.html", { 'bread_crumb_trail': BreadCrumbTrail.leading_to(image_report_detail, name=image.name), 'image': image, 'chart_data': json.dumps(table_data), 'test_names': json.dumps(test_run_names), 'columns': json.dumps(cols), }, RequestContext(request))
def get_filter_results_since(self, filter_name, since=None): """ Name ---- :: get_filter_results_since(filter_name, since=None) Description ----------- Return information about the test runs and results that a given filter matches that are more recent than a previous match -- in more detail, results where the ``tag`` is greater than the value passed in ``since``. The idea of this method is that it will be called from a cron job to update previously accessed results. Something like this:: previous_results = json.load(open('results.json')) results = previous_results + server.dashboard.get_filter_results_since( filter_name, previous_results[-1]['tag']) ... do things with results ... json.save(results, open('results.json', 'w')) If called without passing ``since`` (or with ``since`` set to ``None``), this method returns up to 100 matches from the filter. In fact, the matches are always capped at 100 -- so set your cronjob to execute frequently enough that there are less than 100 matches generated between calls! Arguments --------- ``filter_name``: The name of a filter in the format ~owner/name. ``since``: The 'tag' of the most recent result that was retrieved from this filter. Return value ------------ A list of "filter matches". A filter match describes the results of matching a filter against one or more test runs:: { 'tag': either a stringified date (bundle__uploaded_on) or a build number 'test_runs': [{ 'test_id': test_id 'link': link-to-test-run, 'passes': int, 'fails': int, 'skips': int, 'total': int, # only present if filter specifies cases for this test: 'specific_results': [{ 'test_case_id': test_case_id, 'link': link-to-test-result, 'result': pass/fail/skip/unknown, 'measurement': string-containing-decimal-or-None, 'units': units, }], }] # Only present if filter does not specify tests: 'pass_count': int, 'fail_count': int, } """ filter_data = self._get_filter_data(filter_name) matches = evaluate_filter(self.user, filter_data, descending=False) if since is not None: if filter_data.get('build_number_attribute') is not None: try: since = datetime.datetime.strptime(since, "%Y-%m-%d %H:%M:%S.%f") except ValueError: raise xmlrpclib.Fault( errors.BAD_REQUEST, "cannot parse since argument as datetime") matches = matches.since(since) matches = matches[:100] return [match.serializable() for match in matches]
def pmqa_view(request): test = Test.objects.get(test_id='pwrmgmt') device_types_with_results = [] prefix__device_type_result = {} from lava_scheduler_app.models import DeviceType device_types = list(DeviceType.objects.filter(display=True).values_list('name', flat=True)) bundle_streams = [pmqabs.bundle_stream for pmqabs in PMQABundleStream.objects.all()] bundle_streams.sort(key=lambda bs: bs.pathname) for bs in bundle_streams: c = len(device_types_with_results) for device_type in device_types: if device_type.startswith('rtsm'): continue filter_data = { 'bundle_streams': [bs], 'attributes': [('target.device_type', device_type)], 'tests': [{'test': test, 'test_cases': []}], 'build_number_attribute': 'build.id', } matches = list(evaluate_filter(request.user, filter_data)[:50]) if matches: match = matches[0] m0 = match.serializable(include_links=False) del m0['tag'] last_difference = None for m in matches[1:]: m1 = m.serializable(include_links=False) del m1['tag'] if m1 != m0: last_difference = ( m.tag, reverse(compare_pmqa_results, kwargs={ 'pathname': bs.pathname, 'device_type': device_type, 'build1': str(m.tag), 'build2': str(match.tag), })) break tr = match.test_runs[0] device_types_with_results.append({ 'sn': bs.slug, 'device_type': device_type, 'date': tr.bundle.uploaded_on, 'build': match.tag, 'link': tr.get_absolute_url(), 'width': 0, 'last_difference': last_difference, 'filter_link': reverse(pmqa_filter_view, kwargs=dict( pathname=bs.pathname, device_type=device_type)), }) for result in tr.test_results.all().select_related('test_case'): prefix = result.test_case.test_case_id.split('.')[0] device_type__result = prefix__device_type_result.setdefault(prefix, {}) d = device_type__result.setdefault(device_type, {'pass': 0, 'total': 0, 'present': True}) if result.result == result.RESULT_PASS: d['pass'] += 1 d['total'] += 1 if len(device_types_with_results) > c: device_types_with_results[c]['width'] = len(device_types_with_results) - c results = [] prefixes = sorted(prefix__device_type_result) for prefix in prefixes: board_results = [] for d in device_types_with_results: cell_data = prefix__device_type_result[prefix].get(d['device_type']) if cell_data is not None: if cell_data['total'] == cell_data['pass']: cell_data['css_class'] = 'pass' else: cell_data['css_class'] = 'fail' else: cell_data = { 'css_class': 'missing', 'present': False, } board_results.append(cell_data) results.append((prefix, board_results)) return render_to_response( "dashboard_app/pmqa-view.html", { 'bread_crumb_trail': BreadCrumbTrail.leading_to(pmqa_view), 'device_types_with_results': device_types_with_results, 'results': results, }, RequestContext(request))