def GenerateAllGroupsForAnomaly(cls, anomaly_entity, sheriff_config=None, subscriptions=None): if subscriptions is None: sheriff_config = ( sheriff_config or sheriff_config_client.GetSheriffConfigClient()) subscriptions, _ = sheriff_config.Match( anomaly_entity.test.string_id(), check=True) return [ # TODO(fancl): Support multiple group name cls( id=str(uuid.uuid4()), name=anomaly_entity.benchmark_name, domain=anomaly_entity.master_name, subscription_name=s.name, project_id=s.monorail_project_id, status=cls.Status.untriaged, active=True, revision=RevisionRange( repository='chromium', start=anomaly_entity.start_revision, end=anomaly_entity.end_revision, ), ) for s in subscriptions ]
def __init__( self, group, config=None, sheriff_config=None, issue_tracker=None, pinpoint=None, crrev=None, gitiles=None, revision_info=None, service_account=None, ): self._group = group self._config = config or self.Config( active_window=_ALERT_GROUP_ACTIVE_WINDOW, triage_delay=_ALERT_GROUP_TRIAGE_DELAY, ) self._sheriff_config = ( sheriff_config or sheriff_config_client.GetSheriffConfigClient()) self._issue_tracker = issue_tracker or _IssueTracker() self._pinpoint = pinpoint or pinpoint_service self._crrev = crrev or crrev_service self._gitiles = gitiles or gitiles_service self._revision_info = revision_info or revision_info_client self._service_account = service_account or utils.ServiceAccountEmail
def GenerateAllGroupsForAnomaly(cls, anomaly_entity, sheriff_config=None): sheriff_config = (sheriff_config or sheriff_config_client.GetSheriffConfigClient()) subscriptions, _ = sheriff_config.Match( anomaly_entity.test.string_id(), check=True) # We want to create an issue per project if multiple subscriptions apply to # this anomaly that have different projects. projects = set(s.monorail_project_id for s in subscriptions) return [ # TODO(fancl): Support multiple group name cls( id=str(uuid.uuid4()), name=anomaly_entity.benchmark_name, domain=anomaly_entity.master_name, project_id=project, status=cls.Status.untriaged, active=True, revision=RevisionRange( repository='chromium', start=anomaly_entity.start_revision, end=anomaly_entity.end_revision, ), ) for project in projects ]
def GenerateAllGroupsForAnomaly(cls, anomaly_entity, sheriff_config=None, subscriptions=None): if subscriptions is None: sheriff_config = (sheriff_config or sheriff_config_client.GetSheriffConfigClient()) subscriptions, _ = sheriff_config.Match( anomaly_entity.test.string_id(), check=True) names = anomaly_entity.alert_grouping or [ anomaly_entity.benchmark_name ] return [ cls( id=str(uuid.uuid4()), name=group_name, domain=anomaly_entity.master_name, subscription_name=s.name, project_id=s.monorail_project_id, status=cls.Status.untriaged, group_type=cls.GetType(anomaly_entity), active=True, revision=RevisionRange( repository='chromium', start=anomaly_entity.start_revision, end=anomaly_entity.end_revision, ), ) for s in subscriptions for group_name in names ]
def post(self): """Adds a set of points from the post data. Request parameters: data: JSON encoding of a list of dictionaries. Each dictionary represents one point to add. For each dict, one Row entity will be added, and any required TestMetadata or Master or Bot entities will be created. """ datastore_hooks.SetPrivilegedRequest() data = json.loads(self.request.get('data')) _PrewarmGets(data) all_put_futures = [] added_rows = [] parent_tests = [] for row_dict in data: try: new_row, parent_test, put_futures = _AddRow(row_dict) added_rows.append(new_row) parent_tests.append(parent_test) all_put_futures.extend(put_futures) except add_point.BadRequestError as e: logging.error('Could not add %s, it was invalid.', e.message) except datastore_errors.BadRequestError as e: logging.info('While trying to store %s', row_dict) logging.error('Datastore request failed: %s.', e.message) return ndb.Future.wait_all(all_put_futures) client = sheriff_config_client.GetSheriffConfigClient() tests_keys = [] for t in parent_tests: reason = [] subscriptions, _ = client.Match(t.test_path, check=True) if not subscriptions: reason.append('subscriptions') if not t.has_rows: reason.append('has_rows') if IsRefBuild(t.key): reason.append('RefBuild') if reason: logging.info('Skip test: %s reason=%s', t.key, ','.join(reason)) continue logging.info('Process test: %s', t.key) tests_keys.append(t.key) # Updating of the cached graph revisions should happen after put because # it requires the new row to have a timestamp, which happens upon put. futures = [ graph_revisions.AddRowsToCacheAsync(added_rows), find_anomalies.ProcessTestsAsync(tests_keys) ] ndb.Future.wait_all(futures)
def _GetPreproccessedRegressions(anomalies): regressions = [a for a in anomalies if not a.is_improvement] sheriff_config = sheriff_config_client.GetSheriffConfigClient() subscriptions_dict = {} for a in regressions: response, _ = sheriff_config.Match(a.test.string_id(), check=True) subscriptions_dict.update({s.name: s for s in response}) a.auto_triage_enable = any(s.auto_triage_enable for s in response) subscriptions = subscriptions_dict.values() return (regressions, subscriptions)
def __init__(self, group, config=None, sheriff_config=None, issue_tracker=None, pinpoint=None, crrev=None): self._group = group self._config = config or self.Config( active_window=_ALERT_GROUP_ACTIVE_WINDOW, triage_delay=_ALERT_GROUP_TRIAGE_DELAY, ) self._sheriff_config = ( sheriff_config or sheriff_config_client.GetSheriffConfigClient()) self._issue_tracker = issue_tracker or _IssueTracker() self._pinpoint = pinpoint or pinpoint_service self._crrev = crrev or crrev_service
def _AddRowsFromData(params, revision, parent_test, legacy_parent_tests): data_dict = params['data'] test_key = parent_test.key stat_names_to_test_keys = { k: v.key for k, v in legacy_parent_tests.items() } rows = CreateRowEntities(data_dict, test_key, stat_names_to_test_keys, revision) if not rows: raise ndb.Return() yield ndb.put_multi_async(rows) + [r.UpdateParentAsync() for r in rows] def IsMonitored(client, test): reason = [] request_sampling_percentage = 1.0 if random.random() < request_sampling_percentage: subscriptions, _ = client.Match(test.test_path, check=True) if not subscriptions: reason.append('subscriptions') elif not test.sheriff: reason.append('sheriff') if not test.has_rows: reason.append('has_rows') if reason: logging.info('Skip test: %s reason=%s', test.key, ','.join(reason)) return False logging.info('Process test: %s', test.key) return True client = sheriff_config_client.GetSheriffConfigClient() tests_keys = [] if IsMonitored(client, parent_test): tests_keys.append(parent_test.key) for legacy_parent_test in legacy_parent_tests.values(): if IsMonitored(client, legacy_parent_test): tests_keys.append(legacy_parent_test.key) tests_keys = [k for k in tests_keys if not add_point_queue.IsRefBuild(k)] # Updating of the cached graph revisions should happen after put because # it requires the new row to have a timestamp, which happens upon put. futures = [ graph_revisions.AddRowsToCacheAsync(rows), find_anomalies.ProcessTestsAsync(tests_keys) ] yield futures
def _GetPreproccessedRegressions(anomalies): regressions = [] sheriff_config = sheriff_config_client.GetSheriffConfigClient() subscriptions_dict = {} for a in anomalies: subscriptions, _ = sheriff_config.Match(a.test.string_id(), check=True) subscriptions_dict.update({s.name: s for s in subscriptions}) # Only auto-triage if this is a regression. a.auto_triage_enable = any(s.auto_triage_enable for s in subscriptions) a.relative_delta = abs( a.absolute_delta / float(a.median_before_anomaly) ) if a.median_before_anomaly != 0. else float('Inf') if not a.is_improvement and not a.recovered: regressions.append(a) return (regressions, subscriptions_dict.values())
def _AddAnomaly(self, **kargs): default = { 'test': 'master/bot/test_suite/measurement/test_case', 'start_revision': 0, 'end_revision': 100, 'is_improvement': False, 'median_before_anomaly': 1.1, 'median_after_anomaly': 1.3, 'ownership': { 'component': 'Foo>Bar', 'emails': ['*****@*****.**', '*****@*****.**'], }, } default.update(kargs) default['test'] = utils.TestKey(default['test']) graph_data.TestMetadata(key=default['test']).put() a = anomaly.Anomaly(**default) clt = sheriff_config_client.GetSheriffConfigClient() subscriptions, _ = clt.Match(a) a.groups = alert_group.AlertGroup.GetGroupsForAnomaly(a, subscriptions) return a.put()
def _CreateHistogramTasks(suite_path, histograms, revision, benchmark_description, completion_token=None): tasks = [] duplicate_check = set() measurement_add_futures = [] sheriff_client = sheriff_config_client.GetSheriffConfigClient() for hist in histograms: diagnostics = FindHistogramLevelSparseDiagnostics(hist) test_path = '%s/%s' % (suite_path, histogram_helpers.ComputeTestPath(hist)) # Log the information here so we can see which histograms are being queued. logging.debug('Queueing: %s', test_path) if test_path in duplicate_check: raise api_request_handler.BadRequestError( 'Duplicate histogram detected: %s' % test_path) duplicate_check.add(test_path) # We create one task per histogram, so that we can get as much time as we # need for processing each histogram per task. task_dict = _MakeTaskDict(hist, test_path, revision, benchmark_description, diagnostics, completion_token) tasks.append(_MakeTask([task_dict])) if completion_token is not None: measurement_add_futures.append( completion_token.AddMeasurement( test_path, utils.IsMonitored(sheriff_client, test_path))) ndb.Future.wait_all(measurement_add_futures) return tasks
def _GetSheriffList(): """Returns a list of sheriff names for all sheriffs in the datastore.""" client = sheriff_config_client.GetSheriffConfigClient() subscriptions, _ = client.List(check=True) return [s.name for s in subscriptions]
def testIsMonitored_Negative(self): sheriff_client = sheriff_config_client.GetSheriffConfigClient() self.assertFalse(utils.IsMonitored(sheriff_client, 'test'))