예제 #1
0
 def post(self):
     """Query for tests, and put ones with no new data on the delete queue."""
     datastore_hooks.SetPrivilegedRequest()
     cursor = datastore_query.Cursor(urlsafe=self.request.get('cursor'))
     tests, next_cursor, more = graph_data.TestMetadata.query().fetch_page(
         _TESTS_TO_CHECK_AT_ONCE, keys_only=True, start_cursor=cursor)
     if more:
         taskqueue.add(url='/delete_old_tests',
                       params={'cursor': next_cursor.urlsafe()},
                       queue_name=_TASK_QUEUE_NAME)
     for test in tests:
         # Delete this test if:
         # 1) It has no Rows newer than the cutoff
         # 2) It has no descendant tests
         no_new_rows = False
         last_row = graph_data.Row.query(
             graph_data.Row.parent_test == utils.OldStyleTestKey(
                 test)).order(-graph_data.Row.timestamp).get()
         if last_row:
             if last_row.timestamp < datetime.datetime.today(
             ) - _CUTOFF_DATE:
                 no_new_rows = True
         else:
             no_new_rows = True
         descendants = list_tests.GetTestDescendants(test, keys_only=True)
         descendants.remove(test)
         if not descendants and no_new_rows:
             taskqueue.add(
                 url='/delete_test_data',
                 params={
                     'test_path':
                     utils.TestPath(test),  # For manual inspection.
                     'test_key': test.urlsafe(),
                 },
                 queue_name=_DELETE_TASK_QUEUE_NAME)
 def get(self):
   """Emails sheriffs about new stoppage alerts."""
   datastore_hooks.SetPrivilegedRequest()
   sheriffs_to_email_query = sheriff.Sheriff.query(
       sheriff.Sheriff.stoppage_alert_delay > 0)
   for sheriff_entity in sheriffs_to_email_query:
     _SendStoppageAlertEmail(sheriff_entity)
예제 #3
0
 def post(self):
   """Runs auto bisects."""
   if 'stats' in self.request.query_string:
     self.RenderHtml('result.html', _PrintStartedAndFailedBisectJobs())
     return
   datastore_hooks.SetPrivilegedRequest()
   _RestartFailedBisectJobs()
예제 #4
0
 def post(self):
     """Saves the given entities."""
     datastore_hooks.SetPrivilegedRequest()
     urlsafe_keys = self.request.get('keys').split(',')
     keys = [ndb.Key(urlsafe=k) for k in urlsafe_keys]
     entities = ndb.get_multi(keys)
     ndb.put_multi(entities)
예제 #5
0
    def post(self):
        """Starts migration of old Test entity names to new ones.

    The form that's used to kick off migrations will give the parameters
    old_pattern and new_pattern, which are both test path pattern strings.

    When this handler is called from the task queue, however, it will be given
    the parameters old_test_key and new_test_key, which should both be keys
    of Test entities in urlsafe form.
    """
        datastore_hooks.SetPrivilegedRequest()

        old_pattern = self.request.get('old_pattern')
        new_pattern = self.request.get('new_pattern')
        old_test_key = self.request.get('old_test_key')
        new_test_key = self.request.get('new_test_key')

        if old_pattern and new_pattern:
            try:
                _AddTasksForPattern(old_pattern, new_pattern)
                self.RenderHtml(
                    'result.html',
                    {'headline': 'Test name migration task started.'})
            except BadInputPatternError as error:
                self.ReportError('Error: %s' % error.message, status=400)
        elif old_test_key and new_test_key:
            _MigrateOldTest(old_test_key, new_test_key)
        else:
            self.ReportError(
                'Missing required parameters of /migrate_test_names.')
    def get(self):
        """The get handler method is called from a cron job.

    It expects no parameters and has no output. It checks all current bisect try
    jobs and send comments to an issue on the issue tracker if a bisect job has
    completed.
    """
        credentials = utils.ServiceAccountCredentials()
        issue_tracker = issue_tracker_service.IssueTrackerService(
            additional_credentials=credentials)

        # Set privilege so we can also fetch internal try_job entities.
        datastore_hooks.SetPrivilegedRequest()

        jobs_to_check = try_job.TryJob.query(
            try_job.TryJob.status.IN(['started', 'pending'])).fetch()
        all_successful = True

        for job in jobs_to_check:
            try:
                _CheckJob(job, issue_tracker)
            except Exception as e:  # pylint: disable=broad-except
                logging.error('Caught Exception %s: %s\n%s',
                              type(e).__name__, e, traceback.format_exc())
                all_successful = False

        if all_successful:
            utils.TickMonitoringCustomMetric('UpdateBugWithResults')
예제 #7
0
    def post(self):
        """Recursively deletes TestMetadata and Row data.

    The form that's used to kick off migrations will give the parameter
    pattern, which is a test path pattern string.

    When this handler is called from the task queue, however, it will be given
    the parameter test_key, which should be a key of a TestMetadata entity in
    urlsafe form.
    """
        datastore_hooks.SetPrivilegedRequest()

        pattern = self.request.get('pattern')
        test_key = self.request.get('test_key')

        if pattern:
            try:
                _AddTasksForPattern(pattern)
                self.RenderHtml('result.html',
                                {'headline': 'Test deletion task started.'})
            except BadInputPatternError as error:
                self.ReportError('Error: %s' % error.message, status=400)
        elif test_key:
            _DeleteTest(test_key)
        else:
            self.ReportError(
                'Missing required parameters of /delete_test_data.')
def DeprecateTestsMapper(entity):
    """Marks a TestMetadata entity as deprecated if the last row is too old.

  What is considered "too old" is defined by _DEPRECATION_REVISION_DELTA. Also,
  if all of the subtests in a test have been marked as deprecated, then that
  parent test will be marked as deprecated.

  This mapper doesn't un-deprecate tests if new data has been added; that
  happens in add_point.py.

  Args:
    entity: The TestMetadata entity to check.

  Yields:
    Zero or more datastore mutation operations.
  """
    # Fetch the last row.
    datastore_hooks.SetPrivilegedRequest()
    query = graph_data.Row.query(
        graph_data.Row.parent_test == utils.OldStyleTestKey(entity.key))
    query = query.order(-graph_data.Row.timestamp)
    last_row = query.get()

    # Check if the test should be deleted entirely.
    now = datetime.datetime.now()
    logging.info('checking %s', entity.test_path)
    if not last_row or last_row.timestamp < now - _REMOVAL_REVISON_DELTA:
        descendants = list_tests.GetTestDescendants(entity.key, keys_only=True)
        if entity.key in descendants:
            descendants.remove(entity.key)
        if not descendants:
            logging.info('removing')
            if last_row:
                logging.info('last row timestamp: %s', last_row.timestamp)
            else:
                logging.info('no last row, no descendants')
            taskqueue.add(
                url='/delete_test_data',
                params={
                    'test_path':
                    utils.TestPath(entity.key),  # For manual inspection.
                    'test_key': entity.key.urlsafe(),
                    'notify': 'false',
                },
                queue_name=_DELETE_TASK_QUEUE_NAME)
            return

    if entity.deprecated or not last_row:
        return

    if last_row.timestamp < now - _DEPRECATION_REVISION_DELTA:
        for operation in _MarkDeprecated(entity):
            yield operation

    for operation in _CreateStoppageAlerts(entity, last_row):
        yield operation
예제 #9
0
파일: stats.py 프로젝트: zeptonaut/catapult
  def post(self):
    """Task queue task to get stats before/after a revision of a single Test.

    Request parameters:
      revision: A central revision to look around.
      num_around: The number of points before and after the given revision.
      test_key: The urlsafe string of a Test key.
      parent_key: The urlsafe string of a StatContainer key.
    """
    datastore_hooks.SetPrivilegedRequest()

    revision = int(self.request.get('revision'))
    num_around = int(self.request.get('num_around'), 10)
    test_key = ndb.Key(urlsafe=self.request.get('test_key'))
    container_key = ndb.Key(urlsafe=self.request.get('parent_key'))

    # Get the Rows and values before and starting from the given revision.
    before_revs = graph_data.Row.query(
        graph_data.Row.parent_test == test_key,
        graph_data.Row.revision < revision).order(
            -graph_data.Row.revision).fetch(limit=num_around)
    before_vals = [b.value for b in before_revs]
    after_revs = graph_data.Row.query(
        graph_data.Row.parent_test == test_key,
        graph_data.Row.revision >= revision).order(
            graph_data.Row.revision).fetch(limit=num_around)
    after_vals = [a.value for a in after_revs]

    # There may be no Row at the particular revision requested; if so, we use
    # the first revision after the given revision.
    actual_revision = None
    if after_vals:
      actual_revision = after_revs[0].revision

    test = test_key.get()
    improvement_direction = self._ImprovementDirection(test)
    median_before = math_utils.Median(before_vals)
    median_after = math_utils.Median(after_vals)
    mean_before = math_utils.Median(before_vals)
    mean_after = math_utils.Median(after_vals)
    details = {
        'test_path': utils.TestPath(test_key),
        'improvement_direction': improvement_direction,
        'actual_revision': actual_revision,
        'median_before': '%.2f' % median_before,
        'median_after': '%.2f' % median_after,
        'median_percent_improved': self._PercentImproved(
            median_before, median_after, improvement_direction),
        'mean_before': '%.2f' % mean_before,
        'mean_after': '%.2f' % mean_after,
        'mean_percent_improved': self._PercentImproved(
            mean_before, mean_after, improvement_direction),
        'std': '%.2f' % math_utils.StandardDeviation(before_vals + after_vals),
    }
    new_stat = IndividualStat(parent=container_key, details=details)
    new_stat.put()
예제 #10
0
    def post(self):
        """Refreshes the cached test suites list."""
        logging.info('Going to update test suites data.')

        # Update externally-visible test suites data.
        UpdateTestSuites(datastore_hooks.EXTERNAL)

        # Update internal-only test suites data.
        datastore_hooks.SetPrivilegedRequest()
        UpdateTestSuites(datastore_hooks.INTERNAL)
예제 #11
0
    def post(self):
        """Updates the selected bots internal_only property.

    POST requests will be made by the task queue; tasks are added to the task
    queue either by a kick-off POST from the front-end form, or by this handler
    itself.

    Request parameters:
      internal_only: "true" if turning on internal_only, else "false".
      bots: Bots to update. Multiple bots parameters are possible; the value
          of each should be a string like "MasterName/platform-name".
      test: An urlsafe Key for a TestMetadata entity.
      cursor: An urlsafe Cursor; this parameter is only given if we're part-way
          through processing a Bot or a TestMetadata.

    Outputs:
      A message to the user if this request was started by the web form,
      or an error message if something went wrong, or nothing.
    """
        # /change_internal_only should be only accessible if one has administrator
        # privileges, so requests are guaranteed to be authorized.
        datastore_hooks.SetPrivilegedRequest()

        internal_only_string = self.request.get('internal_only')
        if internal_only_string == 'true':
            internal_only = True
        elif internal_only_string == 'false':
            internal_only = False
        else:
            self.ReportError('No internal_only field')
            return

        bot_names = self.request.get_all('bots')
        test_key_urlsafe = self.request.get('test')
        cursor = self.request.get('cursor', None)

        if bot_names and len(bot_names) > 1:
            self._UpdateMultipleBots(bot_names, internal_only)
            self.RenderHtml(
                'result.html', {
                    'headline':
                    ('Updating internal_only. This may take some time '
                     'depending on the data to update. Check the task queue '
                     'to determine whether the job is still in progress.'),
                })
        elif bot_names and len(bot_names) == 1:
            self._UpdateBot(bot_names[0], internal_only, cursor=cursor)
        elif test_key_urlsafe:
            self._UpdateTest(test_key_urlsafe, internal_only, cursor=cursor)
예제 #12
0
파일: stats.py 프로젝트: zeptonaut/catapult
  def post(self):
    """Kicks off a task on the task queue to generate the requested stats."""
    if not utils.IsInternalUser():
      self.RenderHtml('result.html', {
          'errors': ['Only logged-in internal users can access stats.']
      })
      return

    datastore_hooks.SetPrivilegedRequest()
    stat_type = self.request.get('type')
    stat_container = StatContainer(stat_type=stat_type)

    if stat_type == 'around_revision':
      self._StartGeneratingStatsAroundRevision(stat_container)
    elif stat_type == 'alert_summary':
      self._StartGeneratingStatsForAlerts(stat_container)
    self.redirect('/stats?key=%s' % stat_container.key.urlsafe())
예제 #13
0
  def post(self):
    """Performs any automatic triaging operations.

    This will include updating Anomaly entities, and checking whether they
    should be marked as "recovered", as well as updating Bug entities, and
    commenting on the issue tracker if all alerts for a bug are recovered.
    """
    datastore_hooks.SetPrivilegedRequest()

    # Handle task queue requests.
    if self.request.get('update_recovered_bug'):
      bug_id = int(self.request.get('bug_id'))
      TriageBugs.UpdateRecoveredBugs(bug_id)
      return

    TriageAnomalies.Process()
    TriageBugs.Process()
    def post(self):
        """Validates data parameter and saves to TryJob entity.

    Bisect results come from a "data" parameter, which is a JSON encoding of a
    dictionary.

    The required fields are "master", "bot", "test".

    Request parameters:
      data: JSON encoding of a dictionary.

    Outputs:
      Empty 200 response with if successful,
      200 response with warning message if optional data is invalid,
      403 response with error message if sender IP is not white-listed,
      400 response with error message if required data is invalid.
      500 with error message otherwise.
    """
        datastore_hooks.SetPrivilegedRequest()
        if not self._CheckIpAgainstWhitelist():
            return

        data = self.request.get('data')
        if not data:
            self.ReportError('Missing "data" parameter.', status=400)
            return

        try:
            data = json.loads(self.request.get('data'))
        except ValueError:
            self.ReportError('Invalid JSON string.', status=400)
            return

        logging.info('Received data: %s', data)

        try:
            _ValidateResultsData(data)
            job = _GetTryJob(data)
            if not job:
                self.ReportWarning('No try job found.')
                return
            _UpdateTryJob(job, data)
            update_bug_with_results.UpdateQuickLog(job)
        except BadRequestError as error:
            self.ReportError(error.message, status=400)
예제 #15
0
    def post(self):
        """Adds a set of points from the post data.

    Request parameters:
      data: JSON encoding of a list of dictionaries. Each dictionary represents
          one point to add. For each dict, one Row entity will be added, and
          any required Test or Master or Bot entities will be created.
    """
        datastore_hooks.SetPrivilegedRequest()

        data = json.loads(self.request.get('data'))
        _PrewarmGets(data)

        bot_whitelist = stored_object.Get(BOT_WHITELIST_KEY)

        all_put_futures = []
        added_rows = []
        monitored_test_keys = []
        for row_dict in data:
            try:
                new_row, parent_test, put_futures = _AddRow(
                    row_dict, bot_whitelist)
                added_rows.append(new_row)
                is_monitored = parent_test.sheriff and parent_test.has_rows
                if is_monitored:
                    monitored_test_keys.append(parent_test.key)
                all_put_futures.extend(put_futures)

            except add_point.BadRequestError as e:
                logging.error('Could not add %s, it was invalid.', e.message)
            except datastore_errors.BadRequestError as e:
                logging.error('Datastore request failed: %s.', e.message)
                return

        ndb.Future.wait_all(all_put_futures)

        # Updating of the cached graph revisions should happen after put because
        # it requires the new row to have a timestamp, which happens upon put.
        graph_revisions.AddRowsToCache(added_rows)

        for test_key in monitored_test_keys:
            if not _IsRefBuild(test_key):
                find_anomalies.ProcessTest(test_key)
            else:
                logging.warn('Ref data marked as monitored: %s', str(test_key))
예제 #16
0
파일: mr.py 프로젝트: tigerqiu712/catapult
def DeprecateTestsMapper(entity):
    """Marks a TestMetadata entity as deprecated if the last row is too old.

  What is considered "too old" is defined by _OLDEST_REVISION_DELTA. Also,
  if all of the subtests in a test have been marked as deprecated, then that
  parent test will be marked as deprecated.

  This mapper doesn't un-deprecate tests if new data has been added; that
  happens in add_point.py.

  Args:
    entity: The TestMetadata entity to check.

  Yields:
    Zero or more datastore mutation operations.
  """
    # Make sure that we have a non-deprecated TestMetadata with Rows.
    if (entity.key.kind() != 'TestMetadata' or not entity.has_rows
            or entity.deprecated):
        # TODO(qyearsley): Add test coverage. See catapult:#1346.
        logging.error(
            'Got bad entity in mapreduce! Kind: %s, has_rows: %s, deprecated: %s',
            entity.key.kind(), entity.has_rows, entity.deprecated)
        return

    # Fetch the last row.
    datastore_hooks.SetPrivilegedRequest()
    query = graph_data.Row.query(
        graph_data.Row.parent_test == utils.OldStyleTestKey(entity.key))
    query = query.order(-graph_data.Row.timestamp)
    last_row = query.get()
    if not last_row:
        # TODO(qyearsley): Add test coverage. See catapult:#1346.
        logging.error('No rows for %s (but has_rows=True)', entity.key)
        return

    now = datetime.datetime.now()
    if last_row.timestamp < now - _OLDEST_REVISION_DELTA:
        for operation in _MarkDeprecated(entity):
            yield operation

    for operation in _CreateStoppageAlerts(entity, last_row):
        yield operation
예제 #17
0
파일: stats.py 프로젝트: zeptonaut/catapult
  def post(self):
    """Task queue task to process a single day's alerts for a sheriff."""
    datastore_hooks.SetPrivilegedRequest()
    container_key = ndb.Key(urlsafe=self.request.get('parent_key'))
    sheriff_key = ndb.Key('Sheriff', self.request.get('sheriff'))
    year = int(self.request.get('year'))
    month = int(self.request.get('month'))
    day = int(self.request.get('day'))

    # Fetch all of the alerts for the day.
    start_time = datetime.datetime(year, month, day)
    end_time = start_time + datetime.timedelta(days=1)
    alerts = anomaly.Anomaly.query(
        anomaly.Anomaly.timestamp >= start_time,
        anomaly.Anomaly.timestamp < end_time,
        anomaly.Anomaly.sheriff == sheriff_key).fetch()

    details = collections.defaultdict(dict)
    details['date'] = '%s-%s-%s' % (year, month, day)
    for alert in alerts:
      self._AddAlert(alert, details)
    new_stat = IndividualStat(parent=container_key, details=details)
    new_stat.put()
예제 #18
0
    def get(self):
        """The get handler method is called from a cron job.

    It expects no parameters and has no output. It checks all current bisect try
    jobs and send comments to an issue on the issue tracker if a bisect job has
    completed.
    """
        credentials = rietveld_service.Credentials(
            rietveld_service.GetDefaultRietveldConfig(),
            rietveld_service.PROJECTHOSTING_SCOPE)
        issue_tracker = issue_tracker_service.IssueTrackerService(
            additional_credentials=credentials)

        # Set privilege so we can also fetch internal try_job entities.
        datastore_hooks.SetPrivilegedRequest()

        jobs_to_check = try_job.TryJob.query(
            try_job.TryJob.status == 'started').fetch()
        all_successful = True
        for job in jobs_to_check:
            try:
                if job.use_buildbucket:
                    logging.info('Checking job %s with Buildbucket job ID %s.',
                                 job.key.id(),
                                 getattr(job, 'buildbucket_job_id', None))
                else:
                    logging.info('Checking job %s with Rietveld issue ID %s.',
                                 job.key.id(),
                                 getattr(job, 'rietveld_issue_id', None))
                _CheckJob(job, issue_tracker)
            except Exception as e:  # pylint: disable=broad-except
                logging.error('Caught Exception %s: %s\n%s',
                              type(e).__name__, e, traceback.format_exc())
                all_successful = False
        if all_successful:
            utils.TickMonitoringCustomMetric('UpdateBugWithResults')
예제 #19
0
 def post(self):
     """Runs auto bisects."""
     datastore_hooks.SetPrivilegedRequest()
     _RunBisectIntegrationTests()
예제 #20
0
 def testGet_PrivilegedRequest(self):
   self.UnsetCurrentUser()
   datastore_hooks.SetPrivilegedRequest()
   self._CheckGet(include_internal=True)
예제 #21
0
  def post(self):
    """Validates data parameter and add task to queue to process points.

    The row data comes from a "data" parameter, which is a JSON encoding of a
    list of dictionaries, each of which represents one performance result
    (one point in a graph) and associated data.

      [
        {
          "master": "ChromiumPerf",
          "bot": "xp-release-dual-core",
          "test": "dromaeo/dom/modify",
          "revision": 123456789,
          "value": 24.66,
          "error": 2.33,
          "units": "ms",
          "supplemental_columns": {
            "d_median": 24234.12,
            "d_mean": 23.553,
            "r_webkit": 423340,
            ...
          },
          ...
        },
        ...
      ]

    In general, the required fields are "master", "bot", "test" (which together
    form the test path which identifies the series that this point belongs to),
    and "revision" and "value", which are the X and Y values for the point.

    This API also supports the Dashboard JSON v1.0 format (go/telemetry-json),
    the first producer of which is Telemetry. Telemetry provides lightweight
    serialization of values it produces, as JSON. If a dashboard JSON object is
    passed, it will be a single dict rather than a list, with the test,
    value, error, and units fields replaced by a chart_data field containing a
    Chart JSON dict (see design doc, and example below). Dashboard JSON v1.0 is
    processed by converting it into rows (which can be viewed as Dashboard JSON
    v0).

    {
      "master": "ChromiumPerf",
      <other row fields>,
      "chart_data": {
        "foo": {
          "bar": {
            "type": "scalar",
            "name": "foo.bar",
            "units": "ms",
            "value": 4.2,
          },
          "summary": {
            "type": "list_of_scalar_values",
            "name": "foo",
            "units": "ms",
            "values": [4.2, 5.7, 6.8],
            "std": 1.30512,
          },
      },
    }

    Request parameters:
      data: JSON encoding of a list of dictionaries.

    Outputs:
      Empty 200 response with if successful,
      200 response with warning message if optional data is invalid,
      403 response with error message if sender IP is not white-listed,
      400 response with error message if required data is invalid.
      500 with error message otherwise.
    """
    datastore_hooks.SetPrivilegedRequest()
    if not self._CheckIpAgainstWhitelist():
      # TODO(qyearsley): Add test coverage. See http://crbug.com/447432
      return

    data = self.request.get('data')
    if not data:
      # TODO(qyearsley): Add test coverage. See http://crbug.com/447432
      self.ReportError('Missing "data" parameter.', status=400)
      return

    try:
      data = json.loads(self.request.get('data'))
    except ValueError:
      self.ReportError('Invalid JSON string.', status=400)
      return

    logging.info('Received data: %s', data)

    try:
      if type(data) is dict:
        if data.get('chart_data'):
          data = _DashboardJsonToRawRows(data)
        else:
          self.ReportError(
              'Data should be a list of rows or a Dashboard JSON v1.0 dict.',
              status=400)
          return
      test_map = _ConstructTestPathMap(data)
      for row_dict in data:
        _ValidateRowDict(row_dict, test_map)
      _AddTasksAsync(data)
    except BadRequestError as error:
      # If any of the data was invalid, abort immediately and return an error.
      self.ReportError(error.message, status=400)
예제 #22
0
 def testQuery_PrivilegedRequest_InternalOnlyFetched(self):
   self.UnsetCurrentUser()
   datastore_hooks.SetPrivilegedRequest()
   self._CheckQueryResults(True)