def _MigrateTestLookupPatterns(old_pattern, new_pattern): """Enumerates individual test migration tasks and enqueues them. Typically, this function is called by a request initiated by the user. The purpose of this function is to queue up a set of requests which will do all of the actual work. Args: old_pattern: Test path pattern for old names. new_pattern: Test path pattern for new names. Raises: BadInputPatternError: Something was wrong with the input patterns. """ futures = [] tests = list_tests.GetTestsMatchingPattern(old_pattern, list_entities=False) for test in tests: old_test_key = utils.TestKey(test) new_test_key = utils.TestKey( _ValidateAndGetNewTestPath(old_test_key.id(), new_pattern)) futures.append( _QueueTask({ 'old_test_key': old_test_key.urlsafe(), 'new_test_key': new_test_key.urlsafe(), 'status': _MIGRATE_TEST_CREATE })) for f in futures: f.get_result()
def _GetTestPathFromDict(test_path_dict): """Gets a list of test paths from a test path dictionary. This function looks up series and the corresponding list of selected series. Args: test_path_dict: Dictionary of test path to list of selected series. Returns: List of test paths. """ test_paths = [] for test_path, selected in test_path_dict.iteritems(): if selected: # With an explicit selection, just add all the test_paths listed. parent_test_name = test_path.split('/')[-1] for selection in selected: if selection == parent_test_name: # When the element in the selected list is the same as the last part # of the test_path, it's meant to mean just the test_path. test_paths.append(test_path) else: # Generally the selected element is intended to be the last part of # the test path. test_paths.append('%s/%s' % (test_path, selection)) else: # Without an explicit selection, add this test_path and any children. test_paths.append(test_path) test_paths.extend(list_tests.GetTestsMatchingPattern( '%s/*' % test_path, only_with_rows=True)) return test_paths
def _GetSheriffForBenchmark(self, benchmark, master, benchmarks): # TODO(sullivan): There can be multiple sheriffs; implement this. if not benchmarks[benchmark]['mon']: return None monitored_test_path = benchmarks[benchmark]['mon'][0] pattern = '%s/*/%s/%s' % (master, benchmark, monitored_test_path) monitored_tests = list_tests.GetTestsMatchingPattern( pattern, list_entities=True) return monitored_tests[0].sheriff
def _FetchMonitoredForSuite(suite): """Fetches the set of all monitored tests for a suite across all masters/bots. """ suite_pattern = '*/*/%s' % suite suites = list_tests.GetTestsMatchingPattern(suite_pattern, list_entities=True, use_cache=False) monitored = [] for s in suites: monitored.extend(s.monitored) monitored = sorted(list(set(_GetTestSubPath(p) for p in monitored))) return monitored
def _AddTasksForPattern(pattern): """Enumerates individual test deletion tasks and enqueues them. Typically, this function is called by a request initiated by the user. The purpose of this function is to queue up a set of requests which will do all of the actual work. Args: pattern: Test path pattern for TestMetadatas to delete. Raises: BadInputPatternError: Something was wrong with the input pattern. """ tests = list_tests.GetTestsMatchingPattern(pattern, list_entities=True) for test in tests: _AddTaskForTest(test)
def testEdit_RemovePattern(self): """Tests removing a pattern from an AnomalyConfig.""" self.SetCurrentUser('*****@*****.**', is_admin=True) anomaly_config_key = anomaly_config.AnomalyConfig( id='Test Config', config={ 'a': 10 }, patterns=['*/*/one', '*/*/two']).put() master = graph_data.Master(id='TheMaster').put() graph_data.Bot(id='TheBot', parent=master).put() test_one = graph_data.TestMetadata( id='TheMaster/TheBot/one', overridden_anomaly_config=anomaly_config_key, has_rows=True) test_one.UpdateSheriff() test_one = test_one.put() test_two = graph_data.TestMetadata( id='TheMaster/TheBot/two', overridden_anomaly_config=anomaly_config_key, has_rows=True) test_two.UpdateSheriff() test_two = test_two.put() # Verify the state of the data before making the request. self.assertEqual(['*/*/one', '*/*/two'], anomaly_config_key.get().patterns) self.assertEqual(['TheMaster/TheBot/one'], list_tests.GetTestsMatchingPattern('*/*/one')) self.testapp.post( '/edit_anomaly_configs', { 'add-edit': 'edit', 'edit-name': 'Test Config', 'config': '{"a": 10}', 'patterns': ['*/*/two'], 'xsrf_token': xsrf.GenerateToken(users.get_current_user()), }) self.ExecuteDeferredTasks('default') self.ExecuteTaskQueueTasks('/put_entities_task', edit_config_handler._TASK_QUEUE_NAME) self.assertEqual(['*/*/two'], anomaly_config_key.get().patterns) self.assertIsNone(test_one.get().overridden_anomaly_config) self.assertEqual('Test Config', test_two.get().overridden_anomaly_config.string_id())
def _UpgradeChart(chart): groups = [] if isinstance(chart, list): groups = chart elif isinstance(chart, dict): groups = chart['seriesGroups'] suites = set() measurements = set() bots = set() cases = set() for prefix, suffixes in groups: if suffixes == ['all']: paths = list_tests.GetTestsMatchingPattern(prefix + '/*', only_with_rows=True) else: paths = [] for suffix in suffixes: if suffix == prefix.split('/')[-1]: paths.append(prefix) else: paths.append(prefix + '/' + suffix) for path in paths: desc = descriptor.Descriptor.FromTestPathSync(path) suites.add(desc.test_suite) bots.add(desc.bot) measurements.add(desc.measurement) if desc.test_case: cases.add(desc.test_case) return { 'parameters': { 'testSuites': list(suites), 'measurements': list(measurements), 'bots': list(bots), 'testCases': list(cases), }, }
def _GetUnselectedTestPathFromDict(test_path_dict): """Gets a list of test paths for unselected series for a test path dictionary. This function looks up series that are directly under the provided test path that are also not in the list of selected series. Args: test_path_dict: Dictionary of test path to list of selected sub-series. Returns: List of test paths. """ test_paths = [] for test_path, selected in test_path_dict.iteritems(): parent = test_path.split('/')[-1] if not parent in selected: test_paths.append(test_path) children = list_tests.GetTestsMatchingPattern( '%s/*' % test_path, only_with_rows=True) for child in children: selection = child.split('/')[-1] if selection not in selected: test_paths.append(child) return test_paths
def _ValidateTestPatterns(old_pattern, new_pattern): tests = list_tests.GetTestsMatchingPattern(old_pattern, list_entities=True) for test in tests: old_path = utils.TestPath(test.key) _ValidateAndGetNewTestPath(old_path, new_pattern)
def _GetMastersForSuite(suite): masters = list_tests.GetTestsMatchingPattern('*/*/%s' % suite) masters = list(set([m.split('/')[0] for m in masters])) return masters
def get(self): """Gets the page for viewing recently added points. Request parameters: pattern: A test path pattern with asterisk wildcards (optional). Outputs: A page showing recently added points. """ # Construct a query for recently added Row entities. query = graph_data.Row.query() query = query.order(-graph_data.Row.timestamp) # If a maximum number of tests was specified, use it; fall back on default. try: max_tests = int(self.request.get('max_tests', _MAX_MATCHING_TESTS)) except ValueError: max_tests = _MAX_MATCHING_TESTS # If a test path pattern was specified, filter the query to include only # Row entities that belong to a test that matches the pattern. test_pattern = self.request.get('pattern') num_originally_matching_tests = 0 if test_pattern: test_paths = list_tests.GetTestsMatchingPattern( test_pattern, only_with_rows=True) if not test_paths: self.RenderHtml( 'new_points.html', { 'pattern': test_pattern, 'error': 'No tests matching pattern: %s' % test_pattern, }) return # If test_keys contains too many tests, then this query will exceed a # memory limit or time out. So, limit the number of tests and let the # user know that this has happened. num_originally_matching_tests = len(test_paths) if num_originally_matching_tests > max_tests: test_paths = test_paths[:max_tests] test_keys = map(utils.OldStyleTestKey, test_paths) query = query.filter(graph_data.Row.parent_test.IN(test_keys)) # If a valid number of points was given, use it. Otherwise use the default. try: num_points = int( self.request.get('num_points', _DEFAULT_NUM_POINTS)) except ValueError: num_points = _DEFAULT_NUM_POINTS # Fetch the Row entities. rows = query.fetch(limit=num_points) # Make a list of dicts which will be passed to the template. row_dicts = [] for row in rows: row_dicts.append({ 'test': utils.TestPath(row.parent_test), 'added_time': row.timestamp.strftime('%Y-%m-%d %H:%M:%S %Z'), 'revision': row.revision, 'value': row.value, 'error': row.error, }) error_message = '' if num_originally_matching_tests > max_tests: error_message = ( 'Pattern originally matched %s tests; only showing ' 'points from the first %s tests.' % (num_originally_matching_tests, max_tests)) # Render the template with the row information that was fetched. self.RenderHtml( 'new_points.html', { 'pattern': test_pattern, 'num_points': num_points, 'max_tests': max_tests, 'rows': row_dicts, 'error': error_message, })
def _AllTestPathsMatchingPatterns(patterns_list): """Returns a list of all test paths matching the given list of patterns.""" test_paths = set() for pattern in patterns_list: test_paths |= set(list_tests.GetTestsMatchingPattern(pattern)) return sorted(test_paths)
def _CreateHealthReport(self, name, num_days, master): if not name: name = _DefaultReportName() report = benchmark_health_data.BenchmarkHealthReport( id=name, num_days=int(num_days), master=master) report.put() # Currently there are two ways to list benchmarks: what the dashboard # knows of, and what is in the go/chrome-benchmarks spreadsheet. In the # short term, list benchmarks from both sources. benchmark_names = set() test_paths = list_tests.GetTestsMatchingPattern('%s/*/*' % master) dashboard_benchmarks = set([p.split('/')[2] for p in test_paths]) for benchmark in dashboard_benchmarks: benchmark_names.add(benchmark) benchmark_health_data.BenchmarkHealthData(parent=report.key, id=benchmark, name=benchmark).put() if master in ['ChromiumPerf', 'ClankInternal']: # These masters have owner information in the spreadsheet. spreadsheet_benchmarks = google_sheets_service.GetRange( _BENCHMARK_SHEET_ID, _BENCHMARK_SHEET_NAME, _BENCHMARK_RANGE) if not spreadsheet_benchmarks: logging.error('Failed to load go/chrome-benchmarks') else: for row in spreadsheet_benchmarks: if len(row) == 0: continue benchmark = row[0] owner = None if len(row) == 2: owner = row[1] benchmark_names.add(benchmark) data = ndb.Key('BenchmarkHealthReport', name, 'BenchmarkHealthData', benchmark).get() if not data: benchmark_health_data.BenchmarkHealthData( parent=report.key, id=benchmark, name=benchmark, owner=owner).put() else: data.owner = owner data.put() report.expected_num_benchmarks = len(benchmark_names) report.put() for benchmark_name in benchmark_names: params = { 'benchmark': benchmark_name, 'report_name': name, 'num_days': num_days, 'master': master, } taskqueue.add( url='/generate_benchmark_health_report', params=params, target=os.environ['CURRENT_VERSION_ID'].split('.')[0], queue_name=_TASK_QUEUE_NAME)
def _FillBenchmarkDetailsToHealthReport(self, benchmark_name, report_name, num_days, master): benchmark = ndb.Key('BenchmarkHealthReport', report_name, 'BenchmarkHealthData', benchmark_name).get() if not benchmark: return durations_pattern = '%s/*/%s/BenchmarkDuration' % (master, benchmark_name) test_paths = list_tests.GetTestsMatchingPattern(durations_pattern) futures = set() for test_path in test_paths: key = utils.OldStyleTestKey(test_path) query = graph_data.Row.query(graph_data.Row.parent_test == key) query = query.order(-graph_data.Row.revision) futures.add(query.get_async()) while futures: f = ndb.Future.wait_any(futures) futures.remove(f) row = f.get_result() if not row: continue bot = utils.TestPath(row.parent_test).split('/')[1] benchmark.bots.append( benchmark_health_data.BotHealthData(name=bot, duration=row.value, last_update=row.timestamp)) bug_ids = set() query = anomaly.Anomaly.query( anomaly.Anomaly.benchmark_name == benchmark_name, anomaly.Anomaly.master_name == master, anomaly.Anomaly.is_improvement == False, anomaly.Anomaly.timestamp > datetime.datetime.now() - datetime.timedelta(days=int(num_days))) query = query.order(-anomaly.Anomaly.timestamp) anomalies = query.fetch() for alert in anomalies: bug_id = alert.bug_id if bug_id and bug_id > 0: bug_ids.add(bug_id) benchmark.alerts.append( benchmark_health_data.AlertHealthData( bug_id=bug_id, test_path=utils.TestPath(alert.GetTestMetadataKey()), percent_changed=alert.GetDisplayPercentChanged(), absolute_delta=alert.GetDisplayAbsoluteChanged())) for bug_id in bug_ids: details = bug_details.GetBugDetails(bug_id, utils.ServiceAccountHttp()) benchmark.bugs.append( benchmark_health_data.BugHealthData( bug_id=bug_id, num_comments=len(details['comments']), published=details['published'], state=details['state'], status=details['status'], summary=details['summary'])) for review in details['review_urls']: benchmark.reviews.append( benchmark_health_data.ReviewData(review_url=review, bug_id=bug_id)) for bisect in details['bisects']: benchmark.bisects.append( benchmark_health_data.BisectHealthData( bug_id=bug_id, buildbucket_link=bisect['buildbucket_link'], metric=bisect['metric'], status=bisect['status'], bot=bisect['bot'])) benchmark.is_complete = True benchmark.put()