Beispiel #1
0
    def post(self):
        """Starts migration of old TestMetadata entity names to new ones.

    The form that's used to kick off migrations will give the parameters
    old_pattern and new_pattern, which are both test path pattern strings.

    When this handler is called from the task queue, however, it will be given
    the parameters old_test_key and new_test_key, which should both be keys
    of TestMetadata entities in urlsafe form.
    """
        datastore_hooks.SetPrivilegedRequest()

        old_pattern = self.request.get('old_pattern')
        new_pattern = self.request.get('new_pattern')
        old_test_key = self.request.get('old_test_key')
        new_test_key = self.request.get('new_test_key')

        if old_pattern and new_pattern:
            try:
                _AddTasksForPattern(old_pattern, new_pattern)
                self.RenderHtml(
                    'result.html',
                    {'headline': 'Test name migration task started.'})
            except BadInputPatternError as error:
                self.ReportError('Error: %s' % error.message, status=400)
        elif old_test_key and new_test_key:
            _MigrateOldTest(old_test_key, new_test_key)
        else:
            self.ReportError(
                'Missing required parameters of /migrate_test_names.')
    def get(self):
        """The get handler method is called from a cron job.

    It expects no parameters and has no output. It checks all current bisect try
    jobs and send comments to an issue on the issue tracker if a bisect job has
    completed.
    """
        issue_tracker = issue_tracker_service.IssueTrackerService(
            utils.ServiceAccountHttp())

        # Set privilege so we can also fetch internal try_job entities.
        datastore_hooks.SetPrivilegedRequest()

        jobs_to_check = try_job.TryJob.query(
            try_job.TryJob.status.IN(['started', 'pending'])).fetch()
        all_successful = True

        for job in jobs_to_check:
            try:
                _CheckJob(job, issue_tracker)
            except Exception as e:  # pylint: disable=broad-except
                logging.error('Caught Exception %s: %s\n%s',
                              type(e).__name__, e, traceback.format_exc())
                all_successful = False

        if all_successful:
            utils.TickMonitoringCustomMetric('UpdateBugWithResults')
Beispiel #3
0
def Authorize():
  try:
    email = utils.GetEmail()
  except oauth.OAuthRequestError:
    raise OAuthError

  if not email:
    raise NotLoggedInError

  try:
    if not email.endswith('.gserviceaccount.com'):
      # For non-service account, need to verify that the OAuth client ID
      # is in our whitelist.
      client_id = oauth.get_client_id(utils.OAUTH_SCOPES)
      if client_id not in OAUTH_CLIENT_ID_WHITELIST:
        logging.error('OAuth client id %s for user %s not in whitelist',
                      client_id, email)
        email = None
        raise OAuthError
  except oauth.OAuthRequestError:
    # Transient errors when checking the token result should result in HTTP 500,
    # so catch oauth.OAuthRequestError here, not oauth.Error (which would catch
    # both fatal and transient errors).
    raise OAuthError

  logging.info('OAuth user logged in as: %s', email)
  if utils.IsInternalUser():
    datastore_hooks.SetPrivilegedRequest()
Beispiel #4
0
    def AuthorizedPost(self):
        datastore_hooks.SetPrivilegedRequest()

        try:
            data_str = self.request.get('data')
            if not data_str:
                raise api_request_handler.BadRequestError(
                    'Missing "data" parameter')

            logging.info('Received data: %s', data_str)

            histogram_dicts = json.loads(data_str)
            ProcessHistogramSet(histogram_dicts)
        except api_request_handler.BadRequestError:
            # TODO(simonhatch, eakuefner: Remove this later.
            # When this has all stabilized a bit, remove and let this 400 to clients,
            # but for now to preven the waterfall from re-uploading over and over
            # while we bug fix, let's just log the error.
            # https://github.com/catapult-project/catapult/issues/4019
            logging.error(traceback.format_exc())
        except Exception:  # pylint: disable=broad-except
            # TODO(simonhatch, eakuefner: Remove this later.
            # We shouldn't be catching ALL exceptions, this is just while the
            # stability of the endpoint is being worked on.
            logging.error(traceback.format_exc())
Beispiel #5
0
def Authorize():
    try:
        email = utils.GetEmail()
    except oauth.OAuthRequestError:
        raise OAuthError

    if not email:
        raise NotLoggedInError

    try:
        # TODO(dberris): Migrate to using Cloud IAM and checking roles instead, to
        # allow for dynamic management of the accounts.
        if not email.endswith('.gserviceaccount.com'):
            # For non-service accounts, need to verify that the OAuth client ID
            # is in our allowlist.
            client_id = oauth.get_client_id(utils.OAUTH_SCOPES)
            if client_id not in OAUTH_CLIENT_ID_ALLOWLIST:
                logging.error(
                    'OAuth client id %s for user %s not in allowlist',
                    client_id, email)
                email = None
                raise OAuthError
    except oauth.OAuthRequestError:
        # Transient errors when checking the token result should result in HTTP 500,
        # so catch oauth.OAuthRequestError here, not oauth.Error (which would catch
        # both fatal and transient errors).
        raise OAuthError

    logging.info('OAuth user logged in as: %s', email)
    if utils.IsInternalUser():
        datastore_hooks.SetPrivilegedRequest()
Beispiel #6
0
def Authorize():
    try:
        user = oauth.get_current_user(OAUTH_SCOPES)
    except oauth.Error:
        raise NotLoggedInError

    if not user:
        raise NotLoggedInError

    try:
        if not user.email().endswith('.gserviceaccount.com'):
            # For non-service account, need to verify that the OAuth client ID
            # is in our whitelist.
            client_id = oauth.get_client_id(OAUTH_SCOPES)
            if client_id not in OAUTH_CLIENT_ID_WHITELIST:
                logging.info('OAuth client id %s for user %s not in whitelist',
                             client_id, user.email())
                user = None
                raise OAuthError
    except oauth.Error:
        raise OAuthError

    logging.info('OAuth user logged in as: %s', user.email())
    if utils.IsGroupMember(user.email(), 'chromeperf-access'):
        datastore_hooks.SetPrivilegedRequest()
    def post(self):
        """Recursively deletes TestMetadata and Row data.

    The form that's used to kick off migrations will give the parameter
    pattern, which is a test path pattern string.

    When this handler is called from the task queue, however, it will be given
    the parameter test_key, which should be a key of a TestMetadata entity in
    urlsafe form.
    """
        datastore_hooks.SetPrivilegedRequest()

        pattern = self.request.get('pattern')
        test_key = self.request.get('test_key')
        notify = self.request.get('notify', 'true')
        if notify.lower() == 'true':
            notify = True
        else:
            notify = False

        if pattern:
            try:
                _AddTasksForPattern(pattern, notify)
                self.RenderHtml('result.html',
                                {'headline': 'Test deletion task started.'})
            except BadInputPatternError as error:
                self.ReportError('Error: %s' % error.message, status=400)
        elif test_key:
            _DeleteTest(test_key, notify)
        else:
            self.ReportError(
                'Missing required parameters of /delete_test_data.')
 def get(self):
     """Emails sheriffs about new stoppage alerts."""
     datastore_hooks.SetPrivilegedRequest()
     sheriffs_to_email_query = sheriff.Sheriff.query(
         sheriff.Sheriff.stoppage_alert_delay > 0)
     for sheriff_entity in sheriffs_to_email_query:
         _SendStoppageAlertEmail(sheriff_entity)
Beispiel #9
0
  def get(self):
    """Emails sheriffs with anomalies identified in most-recent 24 hours."""

    if self.request.get('internal_only') == '1':
      datastore_hooks.SetPrivilegedRequest()
      _QueryAndSendSummaryEmails(True)
    else:
      _QueryAndSendSummaryEmails(False)
Beispiel #10
0
 def post(self):
     """Runs auto bisects."""
     if 'stats' in self.request.query_string:
         self.RenderHtml('result.html', _PrintStartedAndFailedBisectJobs())
         return
     datastore_hooks.SetPrivilegedRequest()
     if _RestartFailedBisectJobs():
         utils.TickMonitoringCustomMetric('RestartFailedBisectJobs')
Beispiel #11
0
    def post(self):
        """Adds a set of points from the post data.

    Request parameters:
      data: JSON encoding of a list of dictionaries. Each dictionary represents
          one point to add. For each dict, one Row entity will be added, and
          any required TestMetadata or Master or Bot entities will be created.
    """
        datastore_hooks.SetPrivilegedRequest()

        data = json.loads(self.request.get('data'))
        _PrewarmGets(data)

        all_put_futures = []
        added_rows = []
        parent_tests = []
        for row_dict in data:
            try:
                new_row, parent_test, put_futures = _AddRow(row_dict)
                added_rows.append(new_row)
                parent_tests.append(parent_test)
                all_put_futures.extend(put_futures)

            except add_point.BadRequestError as e:
                logging.error('Could not add %s, it was invalid.', e.message)
            except datastore_errors.BadRequestError as e:
                logging.info('While trying to store %s', row_dict)
                logging.error('Datastore request failed: %s.', e.message)
                return

        ndb.Future.wait_all(all_put_futures)

        client = sheriff_config_client.GetSheriffConfigClient()
        tests_keys = []
        for t in parent_tests:
            reason = []
            subscriptions, _ = client.Match(t.test_path, check=True)
            if not subscriptions:
                reason.append('subscriptions')
            if not t.has_rows:
                reason.append('has_rows')
            if IsRefBuild(t.key):
                reason.append('RefBuild')
            if reason:
                logging.info('Skip test: %s reason=%s', t.key,
                             ','.join(reason))
                continue
            logging.info('Process test: %s', t.key)
            tests_keys.append(t.key)

        # Updating of the cached graph revisions should happen after put because
        # it requires the new row to have a timestamp, which happens upon put.
        futures = [
            graph_revisions.AddRowsToCacheAsync(added_rows),
            find_anomalies.ProcessTestsAsync(tests_keys)
        ]
        ndb.Future.wait_all(futures)
    def post(self):
        """Adds a single histogram or sparse shared diagnostic to the datastore.

    The |data| request parameter can be either a histogram or a sparse shared
    diagnostic; the set of diagnostics that are considered sparse (meaning that
    they don't normally change on every upload for a given benchmark from a
    given bot) is shown in histogram_helpers.SPARSE_DIAGNOSTIC_TYPES.

    See https://goo.gl/lHzea6 for detailed information on the JSON format for
    histograms and diagnostics.

    Request parameters:
      data: JSON encoding of a histogram or shared diagnostic.
      revision: a revision, given as an int.
      test_path: the test path to which this diagnostic or histogram should be
          attached.
    """
        datastore_hooks.SetPrivilegedRequest()

        params = json.loads(self.request.body)

        _PrewarmGets(params)

        # Roughly, the processing of histograms and the processing of rows can be
        # done in parallel since there are no dependencies.

        histogram_futures = []
        token_state_futures = []

        try:
            for p in params:
                histogram_futures.append((p, _ProcessRowAndHistogram(p)))
        except Exception as e:  # pylint: disable=broad-except
            for param, futures_info in itertools.izip_longest(
                    params, histogram_futures):
                if futures_info is not None:
                    continue
                token_state_futures.append(
                    upload_completion_token.Measurement.UpdateStateByIdAsync(
                        param.get('test_path'), param.get('token'),
                        upload_completion_token.State.FAILED, e.message))
            ndb.Future.wait_all(token_state_futures)
            raise

        for info, futures in histogram_futures:
            operation_state = upload_completion_token.State.COMPLETED
            error_message = None
            for f in futures:
                exception = f.get_exception()
                if exception is not None:
                    operation_state = upload_completion_token.State.FAILED
                    error_message = exception.message
            token_state_futures.append(
                upload_completion_token.Measurement.UpdateStateByIdAsync(
                    info.get('test_path'), info.get('token'), operation_state,
                    error_message))
        ndb.Future.wait_all(token_state_futures)
Beispiel #13
0
  def AuthorizedPost(self):
    datastore_hooks.SetPrivilegedRequest()

    data_str = self.request.get('data')
    if not data_str:
      raise api_request_handler.BadRequestError('Missing "data" parameter')

    histogram_dicts = json.loads(data_str)
    ProcessHistogramSet(histogram_dicts)
Beispiel #14
0
def DeprecateTestsMapper(entity):
  """Marks a TestMetadata entity as deprecated if the last row is too old.

  What is considered "too old" is defined by _DEPRECATION_REVISION_DELTA. Also,
  if all of the subtests in a test have been marked as deprecated, then that
  parent test will be marked as deprecated.

  This mapper doesn't un-deprecate tests if new data has been added; that
  happens in add_point.py.

  Args:
    entity: The TestMetadata entity to check.

  Yields:
    Zero or more datastore mutation operations.
  """
  # Fetch the last row.
  datastore_hooks.SetPrivilegedRequest()
  query = graph_data.Row.query(
      graph_data.Row.parent_test == utils.OldStyleTestKey(entity.key))
  query = query.order(-graph_data.Row.timestamp)
  last_row = query.get()

  # Check if the test should be deleted entirely.
  now = datetime.datetime.now()
  logging.info('checking %s', entity.test_path)
  if not last_row or last_row.timestamp < now - _REMOVAL_REVISON_DELTA:
    descendants = list_tests.GetTestDescendants(entity.key, keys_only=True)
    if entity.key in descendants:
      descendants.remove(entity.key)
    if not descendants:
      logging.info('removing')
      if last_row:
        logging.info('last row timestamp: %s', last_row.timestamp)
      else:
        logging.info('no last row, no descendants')
      taskqueue.add(
          url='/delete_test_data',
          params={
              'test_path': utils.TestPath(entity.key),  # For manual inspection.
              'test_key': entity.key.urlsafe(),
              'notify': 'false',
          },
          queue_name=_DELETE_TASK_QUEUE_NAME)
      return


  if entity.deprecated or not last_row:
    return

  if last_row.timestamp < now - _DEPRECATION_REVISION_DELTA:
    for operation in _MarkDeprecated(entity):
      yield operation

  for operation in _CreateStoppageAlerts(entity, last_row):
    yield operation
Beispiel #15
0
    def post(self):
        """Adds a single histogram or sparse shared diagnostic to the datastore.

    The |data| request parameter can be either a histogram or a sparse shared
    diagnostic; the set of diagnostics that are considered sparse (meaning that
    they don't normally change on every upload for a given benchmark from a
    given bot) is shown in add_histograms.SPARSE_DIAGNOSTIC_TYPES.

    See https://goo.gl/lHzea6 for detailed information on the JSON format for
    histograms and diagnostics.

    Request parameters:
      data: JSON encoding of a histogram or shared diagnostic.
      revision: a revision, given as an int.
      test_path: the test path to which this diagnostic or histogram should be
          attached.
    """
        datastore_hooks.SetPrivilegedRequest()

        data = self.request.get('data')
        revision = int(self.request.get('revision'))
        test_path = self.request.get('test_path')

        data_dict = json.loads(data)
        guid = data_dict['guid']
        is_diagnostic = 'type' in data_dict

        test_path_parts = test_path.split('/')
        master = test_path_parts[0]
        bot = test_path_parts[1]
        test_name = '/'.join(test_path_parts[2:])
        bot_whitelist = stored_object.Get(add_point_queue.BOT_WHITELIST_KEY)
        internal_only = add_point_queue.BotInternalOnly(bot, bot_whitelist)
        extra_args = {} if is_diagnostic else GetUnitArgs(data_dict['unit'])
        # TDOO(eakuefner): Populate benchmark_description once it appears in
        # diagnostics.
        test_key = add_point_queue.GetOrCreateAncestors(
            master, bot, test_name, internal_only, **extra_args).key

        if is_diagnostic:
            entity = histogram.SparseDiagnostic(id=guid,
                                                data=data,
                                                test=test_key,
                                                start_revision=revision,
                                                end_revision=revision,
                                                internal_only=internal_only)
        else:
            entity = histogram.Histogram(id=guid,
                                         data=data,
                                         test=test_key,
                                         revision=revision,
                                         internal_only=internal_only)
            AddRow(data_dict, test_key, revision, test_path, internal_only)

        entity.put()
Beispiel #16
0
 def post(self):
   """Refreshes the cached test suites list."""
   if self.request.get('internal_only') == 'true':
     logging.info('Going to update internal-only test suites data.')
     # Update internal-only test suites data.
     datastore_hooks.SetPrivilegedRequest()
     UpdateTestSuites(datastore_hooks.INTERNAL)
   else:
     logging.info('Going to update externally-visible test suites data.')
     # Update externally-visible test suites data.
     UpdateTestSuites(datastore_hooks.EXTERNAL)
Beispiel #17
0
    def post(self):
        """Updates the selected bots internal_only property.

    POST requests will be made by the task queue; tasks are added to the task
    queue either by a kick-off POST from the front-end form, or by this handler
    itself.

    Request parameters:
      internal_only: "true" if turning on internal_only, else "false".
      bots: Bots to update. Multiple bots parameters are possible; the value
          of each should be a string like "MasterName/platform-name".
      test: An urlsafe Key for a TestMetadata entity.
      cursor: An urlsafe Cursor; this parameter is only given if we're part-way
          through processing a Bot or a TestMetadata.

    Outputs:
      A message to the user if this request was started by the web form,
      or an error message if something went wrong, or nothing.
    """
        # /change_internal_only should be only accessible if one has administrator
        # privileges, so requests are guaranteed to be authorized.
        datastore_hooks.SetPrivilegedRequest()

        internal_only_string = self.request.get('internal_only')
        if internal_only_string == 'true':
            internal_only = True
        elif internal_only_string == 'false':
            internal_only = False
        else:
            self.ReportError('No internal_only field')
            return

        bot_names = self.request.get_all('bots')
        test_key_urlsafe = self.request.get('test')
        cursor = self.request.get('cursor', None)

        if bot_names and len(bot_names) > 1:
            self._UpdateMultipleBots(bot_names, internal_only)
            self.RenderHtml(
                'result.html', {
                    'headline':
                    ('Updating internal_only. This may take some time '
                     'depending on the data to update. Check the task queue '
                     'to determine whether the job is still in progress.'),
                })
        elif bot_names and len(bot_names) == 1:
            self._UpdateBot(bot_names[0], internal_only, cursor=cursor)
        elif test_key_urlsafe:
            self._UpdateTest(test_key_urlsafe, internal_only, cursor=cursor)
Beispiel #18
0
    def post(self):
        datastore_hooks.SetPrivilegedRequest()

        data_str = self.request.get('data')
        if not data_str:
            self.ReportError('Missing "data" parameter', status=400)
            return

        try:
            histogram_dicts = json.loads(data_str)
            ProcessHistogramSet(histogram_dicts)
        except ValueError:
            self.ReportError('Invalid JSON string', status=400)
        except BadRequestError as e:
            self.ReportError(e.message, status=400)
Beispiel #19
0
  def AuthorizedPost(self):
    datastore_hooks.SetPrivilegedRequest()

    try:
      data_str = zlib.decompress(self.request.body)
      logging.info('Recieved compressed data.')
    except zlib.error:
      data_str = self.request.get('data')
      logging.info('Recieved uncompressed data.')
    if not data_str:
      raise api_request_handler.BadRequestError('Missing "data" parameter')

    logging.info('Received data: %s', data_str[:100])

    histogram_dicts = json.loads(data_str)
    ProcessHistogramSet(histogram_dicts)
Beispiel #20
0
    def post(self):
        """Adds a set of points from the post data.

    Request parameters:
      data: JSON encoding of a list of dictionaries. Each dictionary represents
          one point to add. For each dict, one Row entity will be added, and
          any required TestMetadata or Master or Bot entities will be created.
    """
        datastore_hooks.SetPrivilegedRequest()

        data = json.loads(self.request.get('data'))
        _PrewarmGets(data)

        bot_whitelist = stored_object.Get(BOT_WHITELIST_KEY)

        all_put_futures = []
        added_rows = []
        parent_tests = []
        for row_dict in data:
            try:
                new_row, parent_test, put_futures = _AddRow(
                    row_dict, bot_whitelist)
                added_rows.append(new_row)
                parent_tests.append(parent_test)
                all_put_futures.extend(put_futures)

            except add_point.BadRequestError as e:
                logging.error('Could not add %s, it was invalid.', e.message)
            except datastore_errors.BadRequestError as e:
                logging.info('While trying to store %s', row_dict)
                logging.error('Datastore request failed: %s.', e.message)
                return

        ndb.Future.wait_all(all_put_futures)

        monitored_test_keys = [
            t.key for t in parent_tests if t.sheriff and t.has_rows
        ]
        tests_keys = [k for k in monitored_test_keys if not IsRefBuild(k)]

        # Updating of the cached graph revisions should happen after put because
        # it requires the new row to have a timestamp, which happens upon put.
        futures = [
            graph_revisions.AddRowsToCacheAsync(added_rows),
            find_anomalies.ProcessTestsAsync(tests_keys)
        ]
        ndb.Future.wait_all(futures)
Beispiel #21
0
    def post(self):
        """Validates data parameter and saves to TryJob entity.

    Bisect results come from a "data" parameter, which is a JSON encoding of a
    dictionary.

    The required fields are "master", "bot", "test".

    Request parameters:
      data: JSON encoding of a dictionary.

    Outputs:
      Empty 200 response with if successful,
      200 response with warning message if optional data is invalid,
      403 response with error message if sender IP is not white-listed,
      400 response with error message if required data is invalid.
      500 with error message otherwise.
    """
        datastore_hooks.SetPrivilegedRequest()
        if not self._CheckIpAgainstWhitelist():
            return

        data = self.request.get('data')
        if not data:
            self.ReportError('Missing "data" parameter.', status=400)
            return

        logging.info('Received data: %s', data)

        try:
            data = json.loads(self.request.get('data'))
        except ValueError:
            self.ReportError('Invalid JSON string.', status=400)
            return

        try:
            _ValidateResultsData(data)
            job = _GetTryJob(data)
            if not job:
                self.ReportWarning('No try job found.')
                return
            _UpdateTryJob(job, data)
            update_bug_with_results.UpdateQuickLog(job, in_progress=True)
        except BadRequestError as error:
            self.ReportError(error.message, status=400)
  def post(self):
    datastore_hooks.SetPrivilegedRequest()

    task_type = self.request.get('type')
    if not task_type or task_type == 'fetch-and-process-tests':
      start_cursor = self.request.get('start_cursor', None)
      if start_cursor:
        start_cursor = Cursor(urlsafe=start_cursor)
      _DeprecateTestsTask(start_cursor)
      return

    if task_type == 'deprecate-test':
      test_key = ndb.Key(urlsafe=self.request.get('test_key'))
      _MarkTestDeprecated(test_key)
      return

    logging.error(
        'Unknown task_type posted to /deprecate_tests: %s', task_type)
    def post(self):
        """Starts migration of old TestMetadata entity names to new ones.

    The form that's used to kick off migrations will give the parameters
    old_pattern and new_pattern, which are both test path pattern strings.

    When this handler is called from the task queue, however, it will be given
    the parameters old_test_key and new_test_key, which should both be keys
    of TestMetadata entities in urlsafe form.
    """
        datastore_hooks.SetPrivilegedRequest()

        status = self.request.get('status')

        if not status:
            try:
                old_pattern = self.request.get('old_pattern')
                new_pattern = self.request.get('new_pattern')
                _MigrateTestBegin(old_pattern, new_pattern)
                self.RenderHtml(
                    'result.html',
                    {'headline': 'Test name migration task started.'})
            except BadInputPatternError as error:
                self.ReportError('Error: %s' % error.message, status=400)
        elif status:
            if status == _MIGRATE_TEST_LOOKUP_PATTERNS:
                old_pattern = self.request.get('old_pattern')
                new_pattern = self.request.get('new_pattern')
                _MigrateTestLookupPatterns(old_pattern, new_pattern)
            elif status == _MIGRATE_TEST_CREATE:
                old_test_key = ndb.Key(
                    urlsafe=self.request.get('old_test_key'))
                new_test_key = ndb.Key(
                    urlsafe=self.request.get('new_test_key'))
                _MigrateTestCreateTest(old_test_key, new_test_key)
            elif status == _MIGRATE_TEST_COPY_DATA:
                old_test_key = ndb.Key(
                    urlsafe=self.request.get('old_test_key'))
                new_test_key = ndb.Key(
                    urlsafe=self.request.get('new_test_key'))
                _MigrateTestCopyData(old_test_key, new_test_key)
        else:
            self.ReportError(
                'Missing required parameters of /migrate_test_names.')
    def post(self):
        """Checks if alerts have recovered, and marks them if so.

    This includes checking untriaged alerts, as well as alerts associated with
    open bugs..
    """
        datastore_hooks.SetPrivilegedRequest()

        # Handle task queue requests.
        bug_id = self.request.get('bug_id')
        project_id = self.request.get('project_id')
        if bug_id:
            bug_id = int(bug_id)
        if not project_id:
            project_id = 'chromium'
        if self.request.get('check_alert'):
            self.MarkAlertAndBugIfRecovered(self.request.get('alert_key'),
                                            bug_id, project_id)
            return
        if self.request.get('check_bug'):
            self.CheckRecoveredAlertsForBug(bug_id, project_id)
            return

        # Kick off task queue jobs for untriaged anomalies.
        alerts = self._FetchUntriagedAnomalies()
        logging.info('Kicking off tasks for %d alerts', len(alerts))
        for alert in alerts:
            taskqueue.add(url='/mark_recovered_alerts',
                          params={
                              'check_alert': 1,
                              'alert_key': alert.urlsafe()
                          },
                          queue_name=_TASK_QUEUE_NAME)

        # Kick off task queue jobs for open bugs.
        bugs = self._FetchOpenBugs()
        logging.info('Kicking off tasks for %d bugs', len(bugs))
        for bug in bugs:
            taskqueue.add(url='/mark_recovered_alerts',
                          params={
                              'check_bug': 1,
                              'bug_id': bug['id']
                          },
                          queue_name=_TASK_QUEUE_NAME)
Beispiel #25
0
def _UpdateDescriptor(test_suite, namespace):
    logging.info('%s %s', test_suite, namespace)
    # This function always runs in the taskqueue as an anonymous user.
    if namespace == datastore_hooks.INTERNAL:
        datastore_hooks.SetPrivilegedRequest()

    desc = descriptor.Descriptor(test_suite=test_suite, bot='place:holder')
    test_path = list(desc.ToTestPathsSync())[0].split('/')

    measurements = set()
    bots = set()
    cases = set()
    # TODO(4549) Tagmaps.

    query = graph_data.TestMetadata.query()
    query = query.filter(graph_data.TestMetadata.suite_name == test_path[2])
    if len(test_path) > 3:
        # test_suite is composite.
        query = query.filter(
            graph_data.TestMetadata.test_part1_name == test_path[3])
    query = query.filter(graph_data.TestMetadata.deprecated == False)
    query = query.filter(graph_data.TestMetadata.has_rows == True)

    # Use an iterator because some test suites have more keys than can fit in
    # memory.
    for key in query.iter(keys_only=True):
        desc = descriptor.Descriptor.FromTestPathSync(utils.TestPath(key))
        bots.add(desc.bot)
        if desc.measurement:
            measurements.add(desc.measurement)
        if desc.test_case:
            cases.add(desc.test_case)

    logging.info('%d measurements, %d bots, %d cases', len(measurements),
                 len(bots), len(cases))
    desc = {
        'measurements': list(sorted(measurements)),
        'bots': list(sorted(bots)),
        'cases': list(sorted(cases)),
    }
    key = namespaced_stored_object.NamespaceKey(CacheKey(test_suite),
                                                namespace)
    stored_object.Set(key, desc)
Beispiel #26
0
    def AuthorizedPost(self):
        datastore_hooks.SetPrivilegedRequest()

        try:
            data_str = self.request.get('data')
            if not data_str:
                raise api_request_handler.BadRequestError(
                    'Missing "data" parameter')

            logging.info('Received data: %s', data_str)

            histogram_dicts = json.loads(data_str)
            ProcessHistogramSet(histogram_dicts)
        except api_request_handler.BadRequestError as e:
            # TODO(simonhatch, eakuefner: Remove this later.
            # When this has all stabilized a bit, remove and let this 400 to clients,
            # but for now to preven the waterfall from re-uploading over and over
            # while we bug fix, let's just log the error.
            # https://github.com/catapult-project/catapult/issues/4019
            logging.error(e.message)
Beispiel #27
0
  def post(self):
    """Saves the given entities."""
    datastore_hooks.SetPrivilegedRequest()
    urlsafe_keys = self.request.get('keys').split(',')
    keys = [ndb.Key(urlsafe=k) for k in urlsafe_keys]
    results = ndb.get_multi(keys)

    tests = []
    entities = []

    for e in results:
      if e.key.kind() == 'TestMetadata':
        tests.append(e)
      else:
        entities.append(e)

    for t in tests:
      t.UpdateSheriff()
      t.put()

    ndb.put_multi(entities)
Beispiel #28
0
    def post(self):
        """Endpoint to create a new health report.

    When there is a request to create a new health report, it's made with:
      report_name: Optional name of report, defaults to date
      num_days: Optional number of days to report on, defaults to 90
      master: Optional master to report on, defaults to ChromiumPerf


    Since querying all the different alerts bugs, etc. required to create the
    report takes quite a while, the entry point to create a new health report
    queues up tasks to this same endpoint which fill in the details, with:
      benchmark: The name of the benchmark to fill in.
    """
        datastore_hooks.SetPrivilegedRequest()

        # This is the entry point for tasks which have already been queued up
        # for individual benchmarks. If the benchmark name is specified, fill in
        # report data for the benchmark.
        benchmark = self.request.get('benchmark')
        if benchmark:
            report_name = self.request.get('report_name')
            if not report_name:
                self.ReportError('No name for report')
                return
            num_days = self.request.get('num_days')
            if not num_days:
                self.ReportError('No number of days for report')
                return
            self._FillBenchmarkDetailsToHealthReport(
                benchmark, report_name, num_days,
                self.request.get('master', 'ChromiumPerf'))
            return

        # This is called for requests to create a new health report. It creates
        # taskqueue tasks which queue up smaller tasks with benchmark args
        # which fill in details for individual benchmarks.
        self._CreateHealthReport(self.request.get('report_name'),
                                 self.request.get('num_days', '90'),
                                 self.request.get('master', 'ChromiumPerf'))
Beispiel #29
0
  def post(self):
    datastore_hooks.SetPrivilegedRequest()

    try:
      params = json.loads(self.request.body)
      gcs_file_path = params['gcs_file_path']

      try:
        gcs_file = cloudstorage.open(
            gcs_file_path, 'r', retry_params=_RETRY_PARAMS)
        with DecompressFileWrapper(gcs_file) as decompressing_file:
          histogram_dicts = _LoadHistogramList(decompressing_file)

        gcs_file.close()

        ProcessHistogramSet(histogram_dicts)
      finally:
        cloudstorage.delete(gcs_file_path, retry_params=_RETRY_PARAMS)

    except Exception as e: # pylint: disable=broad-except
      logging.error('Error processing histograms: %r', e.message)
      self.response.out.write(json.dumps({'error': e.message}))
Beispiel #30
0
  def post(self):
    """Performs any automatic triaging operations.

    This will include updating Anomaly entities, and checking whether they
    should be marked as "recovered", as well as updating Bug entities, and
    commenting on the issue tracker if all alerts for a bug are recovered.
    """
    datastore_hooks.SetPrivilegedRequest()

    # Handle task queue requests.
    if self.request.get('update_recovered_bug'):
      bug_id = int(self.request.get('bug_id'))
      TriageBugs.UpdateRecoveredBugs(bug_id)
      return

    logging.info('Triaging anomalies')
    TriageAnomalies.Process()
    utils.TickMonitoringCustomMetric('TriageAnomalies')
    logging.info('Triaging bugs')
    TriageBugs.Process()
    utils.TickMonitoringCustomMetric('TriageBugs')
    logging.info('/auto_triage complete')